Commit 4234d57c authored by caiyanming's avatar caiyanming

remove hbase-kubernetes

Change-Id: Iad928c94a1f320bf18464f6ae1ed8aa82f520a0e
parent 2dafedd8
FROM registry-vpc.cn-hangzhou.aliyuncs.com/schbrain/jdk:8u172-1.0
MAINTAINER iocanel@gmail.com
USER root
ENV ZOOKEEPER_VERSION 3.6.0
EXPOSE 2181 2888 3888
ADD apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz /opt/
RUN mv /opt/apache-zookeeper-${ZOOKEEPER_VERSION}-bin /opt/zookeeper \
&& cp /opt/zookeeper/conf/zoo_sample.cfg /opt/zookeeper/conf/zoo.cfg \
&& mkdir -p /opt/zookeeper/{data,log}
WORKDIR /opt/zookeeper
VOLUME ["/opt/zookeeper/conf", "/opt/zookeeper/data", "/opt/zookeeper/log"]
COPY config-and-run.sh ./bin/
COPY zoo.cfg ./conf/
CMD ["/opt/zookeeper/bin/config-and-run.sh"]
all: push
NAME=$(IMAGE_BASE_URL)/zookeeper
build:
docker build -t $(NAME):$(IMAGE_TAG) .
push: build
docker push $(NAME):$(IMAGE_TAG)
Fabric8 - ZooKeeper Docker Image
================================
A ZooKeeper Docker Image for use with Kubernetes.
The image supports the following ZooKeeper modes:
* Standalone
* Clustered
# Standalone Mode
To start the image in standalone mode you can simply use:
docker run fabric8/zookeeper
# Clustered Mode
To start the image in clustered mode you need to specify a couple of environment variables for the container.
| Environment Variable | Description |
| --------------------------------------------- | --------------------------------------|
| SERVER_ID | The id of the server |
| MAX_SERVERS | The number of servers in the ensemble |
Each container started with both of the above variables will use the following env variable setup:
server.1=zookeeper-1:2888:3888
server.2=zookeeper-2:2888:3888
server.3=zookeeper-3:2888:3888
...
server.N=zookeeper-N:2888:3888
Ensuring that zookeeper-1, zookeeper-2 ... zookeeper-N can be resolved is beyond the scope of this image.
You can use DNS, or Kubernetes services, etc depending on your environment (see below).
## Inside Kubernetes
Inside Kubernetes you can use a pod setup that looks like:
{
"kind": "Pod",
"apiVersion": "v1beta3",
"metadata": {
"name": "zookeeper-1",
"labels": {
"name": "zookeeper",
"server-id": "1"
}
},
"spec": {
"containers": [
{
"name": "server",
"image": "fabric8/zookeeper",
"env":[
{ "name": "SERVER_ID", "value": "1" },
{ "name": "MAX_SERVERS", "value": "3" }
],
"ports":[
{
"containerPort": 2181
},
{
"containerPort": 2888
},
{
"containerPort": 3888
}
]
}
]
}
In the example above we are creating a pod that creates a container using this image. The container is configured to use the environment variable required for a clustered setup.
Last but not least pod is carefully named (as zookeeper-${SERVER_ID}) so that the other zookeeper servers can easily find it by hostname.
\ No newline at end of file
#!/bin/bash
echo "$SERVER_ID / $MAX_SERVERS"
if [ ! -z "$SERVER_ID" ] && [ ! -z "$MAX_SERVERS" ]; then
echo "Starting up in clustered mode"
echo "" >> /opt/zookeeper/conf/zoo.cfg
echo "#Server List" >> /opt/zookeeper/conf/zoo.cfg
for i in $( eval echo {1..$MAX_SERVERS});do
if [ "$SERVER_ID" = "$i" ];then
echo "server.$i=0.0.0.0:2888:3888" >> /opt/zookeeper/conf/zoo.cfg
else
echo "server.$i=${ZOOKEEPER_MEMBER}-${i}:2888:3888" >> /opt/zookeeper/conf/zoo.cfg
fi
done
cat /opt/zookeeper/conf/zoo.cfg
# Persists the ID of the current instance of Zookeeper
echo ${SERVER_ID} > /opt/zookeeper/data/myid
else
echo "Starting up in standalone mode"
fi
exec /opt/zookeeper/bin/zkServer.sh start-foreground
\ No newline at end of file
apiVersion: apps/v1alpha1
kind: PetSet
metadata:
name: zookeeper
spec:
serviceName: "zookeeper-cluster"
replicas: 3
template:
metadata:
labels:
app: zookeeper
spec:
hostname: "zookeeper"
containers:
- name: nginx
image: index.caicloud.io/caicloud/zookeeper:sysinfra2
ports:
- containerPort: 2181
- containerPort: 2888
- containerPort: 3888
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MAX_SERVERS
value: '2'
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
dataDir=/opt/zookeeper/data
#This option will direct the machine to write the transaction log to the dataLogDir rather than the dataDir. This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots.
dataLogDir=/opt/zookeeper/log
# the port at which the clients will connect
clientPort=2181
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
autopurge.purgeInterval=1
apiVersion: v1
kind: Service
metadata:
name: zookeeper-3
labels:
name: zookeeper-3
spec:
clusterIP: None
ports:
- name: client
port: 2181
targetPort: 2181
- name: followers
port: 2888
targetPort: 2888
- name: election
port: 3888
targetPort: 3888
selector:
name: zookeeper-3
---
apiVersion: v1
kind: Service
metadata:
name: zookeeper-2
labels:
name: zookeeper-2
spec:
clusterIP: None
ports:
- name: client
port: 2181
targetPort: 2181
- name: followers
port: 2888
targetPort: 2888
- name: election
port: 3888
targetPort: 3888
selector:
name: zookeeper-2
---
apiVersion: v1
kind: Service
metadata:
name: zookeeper-1
labels:
name: zookeeper-1
spec:
clusterIP: None
ports:
- name: client
port: 2181
targetPort: 2181
- name: followers
port: 2888
targetPort: 2888
- name: election
port: 3888
targetPort: 3888
selector:
name: zookeeper-1
---
apiVersion: v1
kind: Pod
metadata:
name: zookeeper-3
labels:
name: zookeeper-3
spec:
containers:
- name: server
image: cargo.caicloudprivatetest.com/caicloud/zookeeper:sysinfra
imagePullPolicy: Always
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: SERVER_ID
value: '3'
- name: MAX_SERVERS
value: '3'
- name: ZOOKEEPER_MEMBER
value: 'zookeeper'
ports:
- containerPort: 2181
- containerPort: 2888
- containerPort: 3888
---
apiVersion: v1
kind: Pod
metadata:
name: zookeeper-2
labels:
name: zookeeper-2
spec:
containers:
- name: server
image: cargo.caicloudprivatetest.com/caicloud/zookeeper:sysinfra
imagePullPolicy: Always
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: SERVER_ID
value: '2'
- name: MAX_SERVERS
value: '3'
- name: ZOOKEEPER_MEMBER
value: 'zookeeper'
ports:
- containerPort: 2181
- containerPort: 2888
- containerPort: 3888
---
apiVersion: v1
kind: Pod
metadata:
name: zookeeper-1
labels:
name: zookeeper-1
spec:
containers:
- name: server
image: cargo.caicloudprivatetest.com/caicloud/zookeeper:sysinfra
imagePullPolicy: Always
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: SERVER_ID
value: '1'
- name: MAX_SERVERS
value: '3'
- name: ZOOKEEPER_MEMBER
value: 'zookeeper'
ports:
- containerPort: 2181
- containerPort: 2888
- containerPort: 3888
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment