yaml Influxdb Kubernetes在当地

Influxdb Kubernetes在当地

1_virtualbox.sh
sudo apt-get update && sudo apt-get install -y apt-transport-https
wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -
sudo add-apt-repository "deb http://download.virtualbox.org/virtualbox/debian bionic contrib"
sudo apt update
sudo apt install virtualbox-6.0
2_kubectl.sh
sudo apt-get update && sudo apt-get install -y apt-transport-https
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubectl
3_minikube.sh
curl -Lo minikube https://storage.googleapis.com/minikube/releases/v1.0.0/minikube-linux-amd64 && chmod +x minikube && sudo cp minikube /usr/local/bin/ && rm minikube
4_run.sh
minikube start
# with settings: minikube start --cpus 4 --memory 8192
kubectl api-versions
kubectl cluster-info
kubectl get nodes
kubectl describe node
5_helm.sh
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
chmod 700 get_helm.sh
./get_helm.sh
6_clusterrole.yalm
# filename: clusterrole.yalm
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: cluster-admin
rules:
- apiGroups:
  - '*'
  resources:
  - '*'
  verbs:
  - '*'
- nonResourceURLs:
  - '*'
  verbs:
  - '*'
7_helm_tiller.sh
kubectl create -f clusterrole.yaml
kubectl create serviceaccount -n kube-system tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
helm init --service-account tiller
helm init --upgrade --service-account tiller
kubectl --namespace kube-system get pods | grep tiller
8_InfluxDB_helm_chart.sh
helm install --name my-release \
  --set persistence.enabled=true,persistence.size=5Gi \
    stable/influxdb
9_values.yaml
## influxdb image version
## ref: https://hub.docker.com/r/library/influxdb/tags/
image:
  repo: "influxdb"
  tag: "1.7.3-alpine"
  pullPolicy: IfNotPresent

## Specify a service type
## NodePort is default
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
  ## Add annotations to service
  # annotations: {}
  type: ClusterIP
  ## Add IP Cluster
  # clusterIP: ""
  ## Add external IPs that route to one or more cluster nodes
  # externalIPs: []
  ## Specify LoadBalancer IP (only allow on some cloud provider)
  # loadBalancerIP: ""
  ## Allow source IPs to access on service (if empty, any access allow)
  # loadBalancerSourceRanges: []

## Persist data to a persistent volume
##
persistence:
  enabled: false
  ## If true will use an existing PVC instead of creating one
  # useExisting: false
  ## Name of existing PVC to be used in the influx deployment
  # name:
  ## influxdb data Persistent Volume Storage Class
  ## If defined, storageClassName: <storageClass>
  ## If set to "-", storageClassName: "", which disables dynamic provisioning
  ## If undefined (the default) or set to null, no storageClassName spec is
  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
  ##   GKE, AWS & OpenStack)
  ##
  # storageClass: "-"
  accessMode: ReadWriteOnce
  size: 8Gi

## Create default user through Kubernetes job
## Defaults indicated below
##
setDefaultUser:
  enabled: false

  ## Image of the container used for job
  ## Default: appropriate/curl:latest
  ##
  image: appropriate/curl:latest

  ## Deadline for job so it does not retry forever.
  ## Default: activeDeadline: 300
  ##
  activeDeadline: 300

  ## Restart policy for job
  ## Default: OnFailure
  restartPolicy: OnFailure

  user:

    ## The user name
    ## Default: "admin"
    username: "admin"

    ## User password
    ## single quotes must be escaped (\')
    ## Default: (Randomly generated 10 characters of AlphaNum)
    # password:

    ## User privileges
    ## Default: "WITH ALL PRIVILEGES"
    privileges: "WITH ALL PRIVILEGES"

## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
  requests:
    memory: 256Mi
    cpu: 0.1
  limits:
    memory: 16Gi
    cpu: 8

ingress:
  enabled: false
  tls: false
  # secretName: my-tls-cert # only needed if tls above is true
  hostname: influxdb.foobar.com
  annotations:
    # kubernetes.io/ingress.class: "nginx"
    # kubernetes.io/tls-acme: "true"

## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:

## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}

## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}

## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
# - key: "key"
#   operator: "Equal|Exists"
#   value: "value"
#   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"

## The InfluxDB image uses several environment variables to automatically
## configure certain parts of the server.
## Ref: https://hub.docker.com/_/influxdb/
env:
  # - name: INFLUXDB_DB
  #   value: "demo"

## Change InfluxDB configuration parameters below:
## Defaults are indicated
## ref: https://docs.influxdata.com/influxdb/v1.1/administration/config/
config:
  reporting_disabled: false
  storage_directory: /var/lib/influxdb
  rpc:
    enabled: true
    bind_address: 8088
  meta:
    retention_autocreate: true
    logging_enabled: true
  data:
    query_log_enabled: true
    cache_max_memory_size: 1073741824
    cache_snapshot_memory_size: 26214400
    cache_snapshot_write_cold_duration: 10m0s
    compact_full_write_cold_duration: 4h0m0s
    max_series_per_database: 1000000
    max_values_per_tag: 100000
    trace_logging_enabled: false
  coordinator:
    write_timeout: 10s
    max_concurrent_queries: 0
    query_timeout: 0s
    log_queries_after: 0s
    max_select_point: 0
    max_select_series: 0
    max_select_buckets: 0
  retention:
    enabled: true
    check_interval: 30m0s
  shard_precreation:
    enabled: true
    check_interval: 10m0s
    advance_period: 30m0s
  admin:
    enabled: false
    bind_address: 8083
    https_enabled: false
    https_certificate: /etc/ssl/influxdb.pem
  monitor:
    store_enabled: true
    store_database: _internal
    store_interval: 10s
  subscriber:
    enabled: true
    http_timeout: 30s
    insecure_skip_verify: false
    ca_certs: ""
    write_concurrency: 40
    write_buffer_size: 1000
  http:
    enabled: true
    bind_address: 8086
    auth_enabled: false
    log_enabled: true
    write_tracing: false
    pprof_enabled: true
    https_enabled: false
    https_certificate: /etc/ssl/influxdb.pem
    https_private_key: ""
    max_row_limit: 10000
    max_connection_limit: 0
    shared_secret: "beetlejuicebeetlejuicebeetlejuice"
    realm: InfluxDB
    unix_socket_enabled: false
    bind_socket: /var/run/influxdb.sock
  graphite:
    enabled: false
    bind_address: 2003
    database: graphite
    retention_policy: autogen
    protocol: tcp
    batch_size: 5000
    batch_pending: 10
    batch_timeout: 1s
    consistency_level: one
    separator: .
    udp_read_buffer: 0
    # Uncomment to define graphite templates
    # templates:
    #   - "graphite.metric.*.*.* measurement.run"
  collectd:
    enabled: false
    bind_address: 25826
    database: collectd
    retention_policy: autogen
    batch_size: 5000
    batch_pending: 10
    batch_timeout: 10s
    read_buffer: 0
    typesdb: /usr/share/collectd/types.db
    security_level: none
    auth_file: /etc/collectd/auth_file
  opentsdb:
    enabled: false
    bind_address: 4242
    database: opentsdb
    retention_policy: autogen
    consistency_level: one
    tls_enabled: false
    certificate: /etc/ssl/influxdb.pem
    batch_size: 1000
    batch_pending: 5
    batch_timeout: 1s
    log_point_errors: true
  udp:
    enabled: false
    bind_address: 8089
    database: udp
    retention_policy: autogen
    batch_size: 5000
    batch_pending: 10
    read_buffer: 0
    batch_timeout: 1s
    precision: "ns"
  continuous_queries:
    log_enabled: true
    enabled: true
    run_interval: 1s
  logging:
    format: auto
    level: info
    supress_logo: false

yaml Spring FlyWay迁移Config

##Конфигурациядоступакпакетусмиграциямиsql

application.yml
spring.flyway.locations: classpath:db/migrations

yaml Spring DB数据源配置

##КонфигурациядоступакБД<br/> * datasource *вSpringBootспомощьюсвойств** application.yml **

application.yml
spring:
  datasource:
    url: ${GAT_DRIER_DS_URL:jdbc:postgresql://localhost:5432/drier_moscow}
    username: ${GAT_DRIER_DS_USER:gat_drier}
    password: ${GAT_DRIER_DS_PASSWORD:gat_drier}
    driver-class-name: org.postgresql.Driver # Настройка только для Jdbc

yaml 剧本

playbook
---
- hosts: all
  tasks:
   - name: Get jboss process
     shell: ps -ef | grep -v grep | grep -w java | awk '{print $2}'
     register: running_processes

   - name: Kill java processes
     command: "kill {{ item }} "
     with_items: "{{ running_processes.stdout_lines }}"
     become: yes
     become_user: jboss

   - wait_for:
       path: "/proc/{{ item }}/status"
       state: absent
     with_items: "{{ running_processes.stdout_lines }}"
     ignore_errors: yes
     register: killed_processes
     become: yes

   - name: jboss cleaning
     file:
       path: "{{ item }}"
       state: absent
     with_items:  
       - /opt/jboss-eap-7.1/indices
       - /opt/jboss-eap-7.1/bin/indices
       - /opt/jboss-eap-7.1/standalone/tmp
       - /opt/jboss-eap-7.1/standalone/data
       - /opt/jboss-eap-7.1/standalone/deployments/*
     become: yes
     become_user: jboss
     tags:
       - cleaning

   - name: Copy artifact
     copy:
       src: /var/lib/jenkins/workspace/{{pje_build}}/pje-web/target/pje.war
       dest: /opt/jboss-eap-7.1/standalone/deployments/{{ pje_artefato }}
       owner: jboss
       group: jboss
       mode: 0755
     become: yes
     become_user: jboss
     tags:
       - copy

   - name: Start jboss
     chdir: /opt/jboss-eap-7.1/bin
     shell: ./standalone.sh &
     environment:
        JAVA_HOME: /opt/jdk
        ENV_EUREKA_CLIENT_HOSTNAME: "{{ ansible_default_ipv4.address }}"
     become: yes
     become_user: jboss
     async: 45
     poll: 0
     tags: 
       - start

   - debug:
        msg: "IP: {{ ansible_default_ipv4.address }}"
     tags:
       - deb

### Main commands

#Executing playbook in some  specific hosts
ansible-playbook -i hosts pje-21-provision.yml -l quartzs -t "stop-jboss,delay-jboss,start-jboss" -e use_servicos=true
ansible-playbook -i hosts teste.yml -l quartz-1g

yaml 无人机管道(S3缓存)

`mount`列表中的每个文件/目录将被单独存档并缓存到存储库中的指定S3存储桶

Drone Pipeline S3 Caching
pipeline:
  caching_steps:
drone-pipeline-cache-restore
  # Download previously cached info from S3
  cache-restore:
    image: meltwater/drone-cache
    bucket: __AWS_bucket__
    restore: true
    region: eu-west-1
    mount:
      - some_workspace_dir
drone-pipeline-cache-rebuild
  # Push adjusted code back up to S3
  cache-rebuild:
    image: meltwater/drone-cache
    bucket: __AWS_bucket__
    rebuild: true
    region: eu-west-1
    mount:
      - some_workspace_dir

yaml 与Nginx在GKE上的Kubernetes

与Nginx在GKE上的Kubernetes

ReadMe.md
# Kubernetes Configuration

I have included a .env, ingress-nginx.yaml, controller-nginx.yaml, and bash script that will build your cluster and deploys everything in your folder you specifify in the parameter for the bash script parameters

To deploy from your cloud-config/staging folder and create the cluster do this:
```
./deploy.sh staging y
```

If you want to upgrade your existing app deployment, simply remove the y
```
./deploy.sh staging
```

# Here is an explanation in a more readable format

## Set Project
``` 
gcloud config set project kube-tutorial
```

## Get Compute Zones
```
gcloud compute zones list
```

## Set Compute Zones and Region
```
gcloud config set compute/zone us-east1-b
gcloud config set compute/region us-east1
```
## Create Clusters
```
gcloud container --project "kube-tutorial" clusters create "my-app-staging-cluster" --zone "us-east1-b" --username "admin" --cluster-version "1.11.7-gke.4" --machine-type "custom-2-8192" --image-type "COS" --disk-type "pd-standard" --disk-size "100" --scopes "https://www.googleapis.com/auth/cloud-platform" --num-nodes "3" --enable-cloud-logging --enable-cloud-monitoring --no-enable-ip-alias --network "projects/kube-tutorial/global/networks/default" --subnetwork "projects/kube-tutorial/regions/us-east1/subnetworks/default" --addons HorizontalPodAutoscaling,HttpLoadBalancing --enable-autoupgrade --enable-autorepair
```

## Instal Helm 

Helm is a package manager for Kubernetes and the packages are called "charts" . We use this to install nginx ingress controller.
```
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
chmod 700 get_helm.sh
./get_helm.sh
helm init
```

## Configure Tiller for Helm
```
kubectl create namespace staging
helm init --tiller-namespace staging
kubectl create serviceaccount --namespace staging tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=staging:tiller
helm init --service-account tiller --upgrade
```
## Verify Tiller Deployment Ready
```
kubectl get deployments -n staging
```

## Install an App and Expose it (Cluster IP only)

In this example, I'm using an app-deploy.yaml, but you can use any image you want.  (see app-deploy.yaml)
```
kubectl create -f app-deploy.yaml            
kubectl expose deployment my-app-staging --port=8087 --namespace staging
```

## Install Nginx Load Balancer using Helm and then update the settings with a config yaml
```
helm install --name nginx-ingress-my-app-staging stable/nginx-ingress --set rbac.create=true --tiller-namespace staging
kubectl apply -f cloud-config/$ENV/controller-nginx.yaml
```
## Verify Controller is Live (external ip)
```
kubectl get service nginx-ingress-my-app-staging-controller
```

## Install Certificate - If you want ssl
```
kubectl create secret tls tls-secret-staging --key my-app.com.key --cert my-app.com.crt -n staging   
```

## Create Ingress-Resource for Traffic

Create an ingress-nginx.yaml file that has your paths (see yaml file)

```
kubectl apply -f ingress-nginx.yaml
```
Browse to: http://external-ip-of-ingress-controller/

# TADA


---

## *If Clusters are already created, and you are updating your ONLY updating your image then do this:*

```
gcloud container clusters get-credentials my-app-staging-cluster
kubectl set image deployment/my-app-staging  my-app-staging=gcr.io/my-repo-01/my-app-staging:<TAG HERE/> --namespace staging
```

---

app-deploy.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  annotations:
    deployment.kubernetes.io/revision: "1"
  generation: 1
  labels:
    app: my-app-staging
  name: my-app-staging
  namespace: staging
spec:
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: my-app-staging
  strategy:
    rollingUpdate:
      maxSurge: 25%
      maxUnavailable: 25%
    type: RollingUpdate
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: my-app-staging
    spec:
      containers:
      - image: gcr.io/my-repo-01/my-app-staging:latest
        imagePullPolicy: IfNotPresent
        name: my-app-staging
        resources: {}
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30
controller-nginx.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  labels:
    app: nginx-ingress
    chart: nginx-ingress-1.3.1
    component: controller
    heritage: Tiller
    release: nginx-ingress-my-app-staging
  name: nginx-ingress-my-app-controller
  namespace: default
data:
  enable-vts-status: "false"
  hsts: "true"
  hsts-include-subdomains: "false"
  hsts-max-age: "31536000"
  hsts-preload: "false"
  client_max_body_size: "25M"
deploy.sh
#!/bin/bash

#Setup a cloud-config directory with all of my yamls for each environment
source cloud-config/$1/.env

#A simple way to increment my version number
increment_version ()
{
  declare -a part=( ${1//\-/ } )
  declare    new
  declare -i carry=1

  for (( CNTR=${#part[@]}-1; CNTR>=0; CNTR-=1 )); do
    len=${#part[CNTR]}
    new=$((part[CNTR]+carry))
    [ ${#new} -gt $len ] && carry=1 || carry=0
    [ $CNTR -gt 0 ] && part[CNTR]=${new: -len} || part[CNTR]=${new}
  done
  new="${part[*]}"
  newversion="${new// /-}"
} 

#My Verions
newversion=''
version=$(cat "version.txt") 
increment_version $version

#This is important to break if the docker build breaks
set -xe

#Determine if I need to create the cluster or update the deployment image
export createcluster=${2:-"N"}
export TAG=$newversion

#Writes out where it is about to go
echo Project: $PROJECTID
echo Environment: $ENV
echo TAG: $TAG

# Are YOU SURE?!
read -p "Do you wish to continue? [y/N]" -n 1 -r
echo    # (optional) move to a new line

if [[ $REPLY =~ ^[Yy]$ ]]
then
	#Docker Build  - I don't use cloud build
    echo Docker Build
	docker build --build-arg docker_env=$ENV --build-arg tag=$TAG-$ENV -t my-app .

	#Docker Tag latest and new version number
	docker tag my-app:latest gcr.io/$PROJECTID/my-app-$ENV:$TAG
	docker tag my-app:latest gcr.io/$PROJECTID/my-app-$ENV:latest
	
	#Docker Push to gcr repo
	docker push gcr.io/$PROJECTID/my-app-$ENV:$TAG
	docker push gcr.io/$PROJECTID/my-app-$ENV:latest
	
	#Make sure the project and zone and region are configured correctly
	gcloud config set project $PROJECTID
	gcloud config set compute/zone $ZONE
	gcloud config set compute/region $REGION
	
	#If you need to create a new cluster, then TADA
	if [[ $createcluster =~ [Yy]$ ]]
	then
		if [[ $ENV =~ [staging]$ ]] 
		then
			gcloud container --project "$PROJECTID" clusters create "my-app-cluster-$ENV" --zone "$ZONE" --username "admin" --cluster-version "1.11.7-gke.12" --machine-type "n1-standard-2" --image-type "COS" --disk-type "pd-standard" --disk-size "100" --scopes "https://www.googleapis.com/auth/cloud-platform" --num-nodes "3" --enable-cloud-logging --enable-cloud-monitoring --no-enable-ip-alias --network "projects/$PROJECTID/global/networks/default" --subnetwork "projects/$PROJECTID/regions/$REGION/subnetworks/default" --addons HorizontalPodAutoscaling,HttpLoadBalancing --enable-autoupgrade --enable-autorepair --enable-autoscaling --min-nodes 3 --max-nodes 6
		else
			gcloud container --project "$PROJECTID" clusters create "my-app-cluster-$ENV" --zone "$ZONE" --username "admin" --cluster-version "1.11.7-gke.12" --machine-type "n1-standard-1" --image-type "COS" --disk-type "pd-standard" --disk-size "100" --scopes "https://www.googleapis.com/auth/cloud-platform" --num-nodes "2" --enable-cloud-logging --enable-cloud-monitoring --no-enable-ip-alias --network "projects/$PROJECTID/global/networks/default" --subnetwork "projects/$PROJECTID/regions/$REGION/subnetworks/default" --addons HorizontalPodAutoscaling,HttpLoadBalancing --enable-autorepair --enable-autoscaling --min-nodes 1 --max-nodes 4
		fi
		curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
		chmod 700 get_helm.sh
		./get_helm.sh
		kubectl create namespace $ENV
		helm init --tiller-namespace $ENV
		kubectl create serviceaccount --namespace $ENV tiller
		kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=$ENV:tiller
		helm init --service-account tiller --tiller-namespace $ENV --upgrade
		kubectl create -f cloud-config/$ENV/app-deploy.yaml  
		kubectl expose deployment my-app-app-$ENV --port=8087 --namespace $ENV
		kubectl autoscale deployment my-app-app-$ENV --max 6 --min 2 --cpu-percent 50 -n $ENV
		kubectl create secret tls tls-secret-$ENV --key your-domain.com.key --cert your-domain.com.crt -n $ENV   
		kubectl apply -f cloud-config/$ENV/ingress-nginx.yaml
		echo $newversion > version.txt
		echo waiting for tiller to become live
		kubectl get pods -n $ENV
		sleep 20
		helm install --name nginx-ingress-my-app-$ENV stable/nginx-ingress --set rbac.create=true --tiller-namespace $ENV
		kubectl apply -f cloud-config/$ENV/controller-nginx.yaml
	else # You are only updating with the new deployment
		gcloud container clusters get-credentials my-app-cluster-$ENV
		kubectl set image deployment/my-app-app-$ENV my-app-app-$ENV=gcr.io/$PROJECTID/my-app-$ENV:$TAG --namespace $ENV
		echo $newversion > version.txt
	fi
fi

env settings
#You should put this in a folder cloud-config/YourEnviromentHere/.env
export ENV=staging
export PROJECTID=my-project-id
export REGION=us-east1
export ZONE=us-east1-b
ingress-nginx.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: ingress-resource-staging
  namespace: staging
  annotations:
    kubernetes.io/ingress.class: nginx
    ingress.kubernetes.io/ssl-redirect: "true"
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  rules:
  - host: staging.my-app.com
    http:
      paths:
      - backend:
          serviceName: my-app-staging
          servicePort: 8080
        path: /
  tls:
  - hosts:
    - staging.my-app.com
    secretName: tls-secret-staging

yaml 直リンク禁止

untitled
#htaccess
<IfModule mod_rewrite.c>
RewriteEngine on
RewriteCond %{HTTP_REFERER} !^http(s)?://(www.)?localhost [NC]
RewriteRule .(pdf|zip)$ - [NC,F,L]
</IfModule>

yaml AWS使用cloud-int设置主机名

cloud-init.yml
#cloud-config
preserve_hostname: false
hostname: myhostname
fqdn: myhostname.example.com
manage_etc_hosts: true

yaml 到位桶,pipelines.yml

bitbucket-pipelines.yml
image: itobuz/itobuz-aws
pipelines:
  branches:
    master:
    - step:
        script:
          - ./deploy.sh

yaml docker compose config用于使用docker设置弹性搜索。

docker compose config用于使用docker设置弹性搜索。

elastic-search-docker-compose.yml
version: '2'
services:
  elasticsearch1:
    image: docker.elastic.co/elasticsearch/elasticsearch:5.4.0
    container_name: elasticsearch1
    restart: always
    environment:
      - cluster.name=docker-cluster
      - bootstrap.memory_lock=true
      - xpack.security.enabled=false
      - ES_JAVA_OPTS=-Xms512m -Xmx512m
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    mem_limit: 1g
    cap_add:
      - IPC_LOCK
    volumes:
      - esdata1:/usr/share/elasticsearch/data
    ports:
      - 9200:9200
    networks:
      - esnet
  elasticsearch2:
    image: docker.elastic.co/elasticsearch/elasticsearch:5.4.0
    restart: always
    environment:
      - cluster.name=docker-cluster
      - bootstrap.memory_lock=true
      - xpack.security.enabled=false
      - ES_JAVA_OPTS=-Xms512m -Xmx512m
      - "discovery.zen.ping.unicast.hosts=elasticsearch1"
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    mem_limit: 1g
    cap_add:
      - IPC_LOCK
    volumes:
      - esdata2:/usr/share/elasticsearch/data
    networks:
      - esnet

volumes:
  esdata1:
    driver: local
  esdata2:
    driver: local

networks:
  esnet:
    driver: bridge