天天看點

Curator Zookeeper分布式鎖

pom.xml中添加如下配置

<!-- https://mvnrepository.com/artifact/org.apache.curator/curator-recipes -->

<dependency>

  <groupId>org.apache.curator</groupId>

  <artifactId>curator-recipes</artifactId>

  <version>2.10.0</version>

</dependency>

zookeeper配置

下載下傳zookeeper并解壓至D:\java\zookeeper-3.4.6:

http://www.eu.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz

zookeeper配置檔案:

zoo-1.cfg

# The number of milliseconds of each tick

tickTime=2000

# The number of ticks that the initial

# synchronization phase can take

initLimit=10

# The number of ticks that can pass between

# sending a request and getting an acknowledgement

syncLimit=5

# the directory where the snapshot is stored.

# do not use /tmp for storage, /tmp here is just

# example sakes.

dataDir=D:/java/zookeeper-3.4.6/data/1

#日志位置

dataLogDir=D:/java/zookeeper-3.4.6/log/1

# the port at which the clients will connect

clientPort=2181

# the maximum number of client connections.

# increase this if you need to handle more clients

#maxClientCnxns=60

#

# Be sure to read the maintenance section of the

# administrator guide before turning on autopurge.

# http:/zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance

# The number of snapshots to retain in dataDir

#autopurge.snapRetainCount=3

# Purge task interval in hours

# Set to "0" to disable auto purge feature

#autopurge.purgeInterval=1

server.1=localhost:2887:3887

server.2=localhost:2888:3888

server.3=localhost:2889:3889

zoo-2.cfg和zoo-3.cfg修改如下配置并建立相應的目錄

修改clientPort:

zoo-1.cfg:clientPort=2181

zoo-2.cfg:clientPort=2182

zoo-3.cfg:clientPort=2183

建立目錄:

zoo-1.cfg:D:/java/zookeeper-3.4.6/data/1

zoo-2.cfg:D:/java/zookeeper-3.4.6/data/2

zoo-3.cfg:D:/java/zookeeper-3.4.6/data/3

分别建立檔案:myid,内容分别為各自的id:1、2和3

D:/java/zookeeper-3.4.6/data/1/myid:1

D:/java/zookeeper-3.4.6/data/2/myid:2

D:/java/zookeeper-3.4.6/data/3/myid:3

分别自動各個zookeeper執行個體

代碼測試

import org.apache.curator.RetryPolicy;

import org.apache.curator.framework.CuratorFramework;

import org.apache.curator.framework.CuratorFrameworkFactory;

import org.apache.curator.framework.recipes.locks.InterProcessMutex;

import org.apache.curator.retry.ExponentialBackoffRetry;

import java.util.concurrent.CountDownLatch;

import java.util.concurrent.ExecutorService;

import java.util.concurrent.Executors;

import java.util.concurrent.TimeUnit;

public class CuratorLockTest {

    public static void main(String[] args) throws InterruptedException {

        CountDownLatch latch = new CountDownLatch(5);

        String zookeeperConnectionString = "localhost:2181,localhost:2182,localhost:2183";

        RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);

        CuratorFramework client = CuratorFrameworkFactory.newClient(

                zookeeperConnectionString, retryPolicy);

        client.start();

        System.out.println("用戶端啟動。。。。");

        ExecutorService exec = Executors.newCachedThreadPool();

        for (int i = 0; i < 5; i++) {

            exec.submit(new MyLock("client" + i, client, latch));

        }

        exec.shutdown();

        latch.await();

        System.out.println("所有任務執行完畢");

        client.close();

        System.out.println("用戶端關閉。。。。");

    }

    static class MyLock implements Runnable {

        private String name;

        private CuratorFramework client;

        private CountDownLatch latch;

        public MyLock(String name, CuratorFramework client, CountDownLatch latch) {

            this.name = name;

            this.client = client;

            this.latch = latch;

        public String getName() {

            return name;

        public void setName(String name) {

        public void run() {

            InterProcessMutex lock = new InterProcessMutex(client, "/test_group");

            try {

                System.out.println("------" + this.name + "---------等待擷取鎖。--------");

                if (lock.acquire(120, TimeUnit.SECONDS)) {

                    try {

                        System.out.println("----------" + this.name + "獲得資源----------");

                        System.out.println("----------" + this.name + "正在處理資源----------");

                        Thread.sleep(10 * 1000);

                        System.out.println("----------" + this.name + "資源使用完畢----------");

                        latch.countDown();

                    } finally {

                        lock.release();

                        System.out.println("----------" + this.name + "釋放----------");

                    }

                }

            } catch (Exception e) {

                e.printStackTrace();

            }

}

運作結果:

用戶端啟動。。。。

------client1---------等待擷取鎖。--------

------client2---------等待擷取鎖。--------

------client0---------等待擷取鎖。--------

------client4---------等待擷取鎖。--------

------client3---------等待擷取鎖。--------

----------client1獲得資源----------

----------client1正在處理資源----------

----------client1資源使用完畢----------

----------client1釋放----------

----------client3獲得資源----------

----------client3正在處理資源----------

----------client3資源使用完畢----------

----------client3釋放----------

----------client0獲得資源----------

----------client0正在處理資源----------

----------client0資源使用完畢----------

----------client0釋放----------

----------client4獲得資源----------

----------client4正在處理資源----------

----------client4資源使用完畢----------

----------client4釋放----------

----------client2獲得資源----------

----------client2正在處理資源----------

----------client2資源使用完畢----------

所有任務執行完畢

本文轉自秋楓部落格園部落格,原文連結:http://www.cnblogs.com/rwxwsblog/p/6112141.html,如需轉載請自行聯系原作者