目录

Docker-开发相关镜像

开发过程中常用镜像。

rabbitmq

#设置默认用户和密码

#如果您想更改默认的用户名和密码 guest / guest,你可以这样做 RABBITMQ_DEFAULT_USER和 RABBITMQ_DEFAULT_PASS环境变量。 这些变量以前在特定于 docker 的入口点 shell 脚本中可用,但现在可直接在 RabbitMQ 中使用。 
docker run -d -p 5672:5672 -p 15672:15672 --name some-rabbit -e RABBITMQ_DEFAULT_USER=admin -e RABBITMQ_DEFAULT_PASS=password### rabbitmq:3-management

设置默认虚拟主机 
docker run -d  --name some-rabbit -e RABBITMQ_DEFAULT_VHOST=my_vhost rabbitmq:3-management

​mariadb​​​​

docker run -d \
    -p 3308:3306 \
    -v /data/mariadb/data:/var/lib/mysql \
    --name some-mariadb \
    --env MARIADB_USER=mc5l \
    --env MARIADB_PASSWORD=EHk5PYsaewq!@3ff \
    --env MARIADB_ROOT_PASSWORD=EHk5PYFbFs%3e  \
	mariadb:latest

seaweedfs

wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-compose.yml

docker-compose -f seaweedfs-compose.yml -p seaweedfs up

mongodb

docker run -d --name mongodb -v /data/mongodb/datadb:/data/db -p 27017:27017 -e MONGO_INITDB_ROOT_USERNAME=admin -e MONGO_INITDB_ROOT_PASSWORD=admin --privileged=true  mongo:lates

postgresql

docker run -d  \
--network my_network  \
-e POSTGRES_PASSWORD=secret  \
-e POSTGRES_USER=redmine  \
--name some-postgres \
-restart always \
postgres

新版本
\镜\postgresql

新版本

version: '2'
services:
  postgres:
    restart: always
    image: postgres:14.4
    container_name: postgres14.4
    ports:
      - "5432:5432"
    volumes:
      - "/data/postgres14_4:/var/lib/postgresql/data"
      - "$PWD/pgsql/postgresql.conf:/etc/postgresql/postgresql.conf"
    environment:
      PGDATA: /var/lib/postgresql/data/pgdata
      POSTGRES_PASSWORD: dev@123,
      POSTGRES_DB: postgres
      TZ: Asia/Shanghai
    networks:
      default:
        ipv4_address: 172.25.0.103

SqlServer2019

#创建主机映射目录
mkdir -p /data/sqlserver2019_data
#修改主机映射目录权限
chown -R 10001:0 /data/sqlserver2019_data

docker run \
--name sqlserver2019 \
-e "ACCEPT_EULA=Y" \
-e "SA_PASSWORD=dev@123," \
-e 'MSSQL_PID=HMWJ3-KY3J2-NMVD7-KG4JR-X2G8G' \
-p 1433:1433 \
-v /data/sqlserver2019_data:/var/opt/mssql  \
-d mcr.microsoft.com/mssql/server:2019-latest



SQL Server 2019密钥
Enterprise:HMWJ3-KY3J2-NMVD7-KG4JR-X2G8G
Strandard:PMBDC-FXVM3-T777P-N4FY8-PKFF4


#启用代理
--e MSSQL_AGENT_ENABLED=True

docker run \
--name sqlserver2019 \
-e "ACCEPT_EULA=Y" \
-e "SA_PASSWORD=To1m@kluas&123" \
-e 'MSSQL_PID=HMWJ3-KY3J2-NMVD7-KG4JR-X2G8G' \
-e 'MSSQL_AGENT_ENABLED=True' \
-p 1433:1433 \
-v /data/sqlserver2019_data:/var/opt/mssql  \
-d mcr.microsoft.com/mssql/server:2019-latest


注:sa的密码要设置成强密码,8位以上,不然容器会报错起不来

InfluxDB2

--network my_network --ip 172.25.1.12

自动升级过程引导系统中的初始管理员用户、组织和存储桶。 其他环境变量用于配置设置逻辑:

    DOCKER_INFLUXDB_INIT_USERNAME:为系统的初始超级用户设置的用户名( 必需 )。
    DOCKER_INFLUXDB_INIT_PASSWORD:为系统初始超级用户设置的密码( 必填 )。
    DOCKER_INFLUXDB_INIT_ORG:为系统的初始组织设置的名称( 必需 )。
    DOCKER_INFLUXDB_INIT_BUCKET:为系统的初始存储桶设置的名称( 必需 )。
    DOCKER_INFLUXDB_INIT_RETENTION:系统的初始存储桶应保留数据的持续时间。 如果未设置,初始存储桶将永久保留数据。
    DOCKER_INFLUXDB_INIT_ADMIN_TOKEN:与系统的初始超级用户关联的身份验证令牌。 如果未设置,系统将自动生成令牌。


#使用以下命令在主机文件系统上生成默认配置文件:
docker run \
  --rm influxdb:2.3.0 \
  influxd print-config > config.yml

docker run \
    --name influxdb2 \
    -d \
    -p 8086:8086 \
    --volume /data/influx2/data:/var/lib/influxdb2 \
    -v /data/influx2/config.yml:/etc/influxdb2/config.yml \
    influxdb:2.3.0

docker run \
    --name influxdb2 \
    -p 8086:8086 \
    -d \
    -v /data/influx2/data:/var/lib/influxdb2 \
    -v /data/influx2/config.yml:/etc/influxdb2/config.yml \
    --network my_network --ip 172.25.1.12 \
    influxdb:2.3.0

redis

docker run -d --name redis -p 6379:6379 redis

docker run -d --name redis --network my_network --ip 172.25.1.11 redis

#设置密码
docker run -d --name redis -p 6379:6379 redis --requirepass "QkKpDbHgqD@R5@aes5s3"



#有多种不同的持久性策略可供选择。 如果至少执行了 1 次写入操作,此操作将每 60 秒保存一次数据库快照(这也会导致更多日志,因此 `loglevel`选项可能是可取的)。 如果启用持久性,数据将存储在 `VOLUME /data`,`-v /docker/host/dir:/data`(
docker run --name some-redis -d redis redis-server --save 60 1 --loglevel warning

mysql

#方法一
#官方MySQL(5.7.19)的docker镜像在创建时映射的配置文件目录有所不同,MySQL(5.7.19)的默认配置文件是 /etc/mysql/my.cnf 文件。如果想要自定义配置,建议向 /etc/mysql/conf.d 目录中创建 .cnf 文件。新建的文件可以任意起名,只要保证后缀名是 cnf 即可。新建的文件中的配置项可以覆盖 /etc/mysql/my.cnf 中的配置项。
mkdir -p /data/mysql/data /data/mysql/logs /data/mysql/conf

cat > /data/mysql/conf/my.cnf <<EOF
[mysqld]
#skip-name-resolve
port = 3306
character-set-server=utf8mb4
collation-server=utf8mb4_unicode_ci
skip-character-set-client-handshake=1
default-storage-engine=INNODB
max_allowed_packet = 500M
explicit_defaults_for_timestamp=1
long_query_time = 10
lower_case_table_names=1
EOF

#创建
docker run -p 3306:3306 
    --name mysql  \
    -v /data/mysql/conf:/etc/mysql/conf.d  \
    -v /data/mysql/logs:/logs  \
    -v /data/mysql/data:/var/lib/mysql  \
    -e MYSQL_ROOT_PASSWORD=123456  \
	--network my_network  \
    --ip 172.25.1.10 \
    --name mysql5.7 \
    -d mysql:5.7 

#无端口
docker run \
    --name mysql  \
    -v /data/mysql/conf:/etc/mysql/conf.d  \
    -v /data/mysql/logs:/logs  \
    -v /data/mysql/data:/var/lib/mysql  \
    -e MYSQL_ROOT_PASSWORD=amAM^EYz2av945Crw%N!  \
    --network my_network  \
    --ip 172.25.1.10 \
    --name mysql5.7 \
    -d mysql:5.7 


#在/data/mysql/conf中创建 *.cnf 文件(叫什么都行)
docker run --name mysql5.7 -p 3306:3306 -e MYSQL_ROOT_PASSWORD=123456 -d mysql:5.7



#方法二
docker run -d -e MYSQL_ROOT_PASSWORD=root --name mysql5.7 mysql:5.7
#复制配置
docker cp mysql5.7:/etc/mysql /data/mysql/conf
#删除容器
docker rm -f mysql5.7
#建立容器
docker run -p 3306:3306 --name mysql-master \
-v /data/mysql/log:/var/log/mysql \
-v /data/mysql/data:/var/lib/mysql  \
-v /data/mysql/conf:/etc/mysql \
-e MYSQL_ROOT_PASSWORD=123456 \
-d mysql:5.7 

#忽略大小写
修改/etc/my.cnf文件,在文件末尾加lower_case_table_names=1。
[mysqld]
lower_case_table_names=1
重启mysql


#my.cnf 基础配置
[client]
default-character-set=utf8mb4
[mysql]
default-character-set=utf8mb4
[mysqld]
init_connect='SET collation_connection = utf8_unicode_ci'
init_connect='SET NAMES utf8'
character-set-server=utf8
collation-server=utf8_unicode_ci
skip-character-set-client-handshake
skip-name-resolve

# 中文乱码问题 mysql5.7以上
[mysqld]
character-set-server=utf8 
[client]
default-character-set=utf8 
[mysql]
default-character-set=utf8



docker run -p 33066:3306 --name mysql -v /data/mysql/conf:/etc/mysql/conf.d -v /data/mysql/logs:/logs -v /data/mysql/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=root --network my_network --ip 172.25.1.10 -d mysql:5.7

--network my_network --ip 172.25.1.10


#mysql 8 版本
#启动
docker run --name mysql8 -p 3306:3306 -e MYSQL_ROOT_PASSWORD=qweQWE123! -d mysql
#进入容器
docker exec -it mysql bash
#登录mysql
mysql -u root -p
ALTER USER 'root'@'localhost' IDENTIFIED BY 'qweQWE123!';
#添加远程登录用户
CREATE USER 'liaozesong'@'%' IDENTIFIED WITH mysql_native_password BY 'qweQWE123!';
GRANT ALL PRIVILEGES ON *.* TO 'liaozesong'@'%';

tdengine

docker run -d --hostname=debian -v /etc/taos:/etc/taos -p 6030:6030 -p 6035:6035 -p 6041:6041 -p 6030-6040:6030-6040/udp tdengine/tdengine:2.0.14.0

Docker/Docker-Compose 安装 Kafka

docker run -d --name zookeeper -p 2181:2181 -e TZ="Asia/Shanghai" -v /opt/zookeeper/data:/data --restart always zookeeper:3.7

docker run -d --name kafka -p 9092:9092 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=Zookeeper-IP:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://IP:9092 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -e TZ="Asia/Shanghai" wurstmeister/kafka

# 启动环境变量参数说明
KAFKA_BROKER_ID kafka集群中每个kafka都有一个BROKER_ID来区分自己
KAFKA_ADVERTISED_LISTENERS kafka的地址和端口,用于向zookeeper注册
KAFKA_ZOOKEEPER_CONNECTzookeeper 地址
KAFKA_LISTENERSkafka 监听端口TZ容器时区

docker run -d --name kfk-manager --restart always -p 9000:9000 -e ZK_HOSTS=<这里换成你的zookeeper地址和端口> sheepkiller/kafka-manager

docker-compose.yml

version: '3.5'
services:
  zookeeper:
    image: wurstmeister/zookeeper   ## 镜像
    container_name: zookeeper
    ports:
      - "2181:2181"                 ## 对外暴露的端口号
  kafka:
    image: wurstmeister/kafka       ## 镜像
    container_name: kafka
    volumes: 
        - /etc/localtime:/etc/localtime ## 挂载位置(kafka镜像和宿主机器之间时间保持一直)
    ports:
      - "9092:9092"
    environment:
      KAFKA_ADVERTISED_HOST_NAME: 127.0.0.1         ## 修改:宿主机IP
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181       ## 卡夫卡运行是基于zookeeper的
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_LOG_RETENTION_HOURS: 120
      KAFKA_MESSAGE_MAX_BYTES: 10000000
      KAFKA_REPLICA_FETCH_MAX_BYTES: 10000000
      KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS: 60000
      KAFKA_NUM_PARTITIONS: 3
      KAFKA_DELETE_RETENTION_MS: 1000
  kafka-map :
    image: dushixiang/kafka-map               ## 镜像:开源的web管理kafka集群的界面
    container_name: kafka-map 
    environment:
        DEFAULT_USERNAME: admin
        DEFAULT_PASSWORD: admin                        ## 修改:宿主机IP
    ports:  
      - "9009:8080"                                 ## 暴露端口 9000这个端口冲突太多
    volumes: 
      - ../kafka-map/data:/usr/local/kafka-map/data
  kafka-manager:
    image: sheepkiller/kafka-manager                ## 镜像:开源的web管理kafka集群的界面
    container_name: kafka-manager
    environment:
        ZK_HOSTS: 192.168.110.30                         ## 修改:宿主机IP
    ports:  
      - "9009:9000"                                 ## 暴露端口 9000这个端口冲突太多

hbase

docker run -d -p 2182:2181 -p 18080:8080 -p 8085:8085 -p 9090:9090 -p 9095:9095 -p 16000:16000 -p 16010:16010 -p 16201:16201 -p 16301:16301  -p 16030:16030 -p 16020:16020 --name hbase001 harisekhon/hbase

clickhouse

docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server

#测试
echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-

本机客户端连接

docker exec -it some-clickhouse-server clickhouse-client

Elasticsearch8

需要 kibana.yml、elasticsearch.yml 文件放在对应目录

elasticsearch.yml

mkdir -p /data/elasticsearch/config/
vi /data/elasticsearch/config/elasticsearch.yml
chown -R 10001:0 /data/elasticsearch/config/elasticsearch.yml

# data  数据目录需要权限
chmod 777 -R /data/elasticsearch/
cluster.name: "docker-cluster"
network.host: 0.0.0.0
path.repo: ["/data/elasticsearch/backup"]

kibana.yml

mkdir -p /data/kibana/config/
vi /data/kibana/config/kibana.yml
#
# ** THIS IS AN AUTO-GENERATED FILE **
#

# Default Kibana configuration for docker target
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
vi docker-compose.yml

``

创建.env

密码必须是字母数字,并且不能包含特殊字符 字符如 !或者 @。 这 bash脚本包含在 docker-compose.yml文件仅对字母数字字符进行操作。

# 'elastic' 用户的密码(至少6个字符)
ELASTIC_PASSWORD=aa123456

# 'kibana_system' 用户的密码(至少6个字符)
KIBANA_PASSWORD=aa123456

# Elastic 产品的版本
STACK_VERSION=8.8.2

# 设置集群名称
CLUSTER_NAME=docker-cluster

# 设置为 'basic' 或 'trial',以自动启动为期30天的试用版
LICENSE=basic
#LICENSE=trial

# 将 Elasticsearch HTTP API 的端口暴露给主机
ES_PORT=9200
#ES_PORT=127.0.0.1:9200

# 将 Kibana 暴露给主机的端口
KIBANA_PORT=5601
#KIBANA_PORT=80

# 根据可用主机内存的情况进行增减(以字节为单位),这里有个坑,因该直接设置值,不然会直接G掉
MEM_LIMIT=6g
MEM_LIMIT=1073741824
# 项目命名空间(如果未设置,则默认为当前文件夹名称)
#COMPOSE_PROJECT_NAME=myproject

集群版

version: "2.2"

services:
  setup:
    image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
    volumes:
      - certs:/usr/share/elasticsearch/config/certs
    user: "0"
    command: >
      bash -c '
        if [ x${ELASTIC_PASSWORD} == x ]; then
          echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
          exit 1;
        elif [ x${KIBANA_PASSWORD} == x ]; then
          echo "Set the KIBANA_PASSWORD environment variable in the .env file";
          exit 1;
        fi;
        if [ ! -f config/certs/ca.zip ]; then
          echo "Creating CA";
          bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
          unzip config/certs/ca.zip -d config/certs;
        fi;
        if [ ! -f config/certs/certs.zip ]; then
          echo "Creating certs";
          echo -ne \
          "instances:\n"\
          "  - name: es01\n"\
          "    dns:\n"\
          "      - es01\n"\
          "      - localhost\n"\
          "    ip:\n"\
          "      - 127.0.0.1\n"\
          "  - name: es02\n"\
          "    dns:\n"\
          "      - es02\n"\
          "      - localhost\n"\
          "    ip:\n"\
          "      - 127.0.0.1\n"\
          "  - name: es03\n"\
          "    dns:\n"\
          "      - es03\n"\
          "      - localhost\n"\
          "    ip:\n"\
          "      - 127.0.0.1\n"\
          > config/certs/instances.yml;
          bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
          unzip config/certs/certs.zip -d config/certs;
        fi;
        echo "Setting file permissions"
        chown -R root:root config/certs;
        find . -type d -exec chmod 750 \{\} \;;
        find . -type f -exec chmod 640 \{\} \;;
        echo "Waiting for Elasticsearch availability";
        until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
        echo "Setting kibana_system password";
        until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
        echo "All done!";
      '
    healthcheck:
      test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
      interval: 1s
      timeout: 5s
      retries: 120

  es01:
    depends_on:
      setup:
        condition: service_healthy
    image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
    volumes:
      - certs:/usr/share/elasticsearch/config/certs
      - esdata01:/usr/share/elasticsearch/data
    ports:
      - ${ES_PORT}:9200
    environment:
      - node.name=es01
      - cluster.name=${CLUSTER_NAME}
      - cluster.initial_master_nodes=es01,es02,es03
      - discovery.seed_hosts=es02,es03
      - ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
      - bootstrap.memory_lock=true
      - xpack.security.enabled=true
      - xpack.security.http.ssl.enabled=true
      - xpack.security.http.ssl.key=certs/es01/es01.key
      - xpack.security.http.ssl.certificate=certs/es01/es01.crt
      - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
      - xpack.security.transport.ssl.enabled=true
      - xpack.security.transport.ssl.key=certs/es01/es01.key
      - xpack.security.transport.ssl.certificate=certs/es01/es01.crt
      - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
      - xpack.security.transport.ssl.verification_mode=certificate
      - xpack.license.self_generated.type=${LICENSE}
    mem_limit: ${MEM_LIMIT}
    ulimits:
      memlock:
        soft: -1
        hard: -1
    healthcheck:
      test:
        [
          "CMD-SHELL",
          "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
        ]
      interval: 10s
      timeout: 10s
      retries: 120

  es02:
    depends_on:
      - es01
    image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
    volumes:
      - certs:/usr/share/elasticsearch/config/certs
      - esdata02:/usr/share/elasticsearch/data
    environment:
      - node.name=es02
      - cluster.name=${CLUSTER_NAME}
      - cluster.initial_master_nodes=es01,es02,es03
      - discovery.seed_hosts=es01,es03
      - bootstrap.memory_lock=true
      - xpack.security.enabled=true
      - xpack.security.http.ssl.enabled=true
      - xpack.security.http.ssl.key=certs/es02/es02.key
      - xpack.security.http.ssl.certificate=certs/es02/es02.crt
      - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
      - xpack.security.transport.ssl.enabled=true
      - xpack.security.transport.ssl.key=certs/es02/es02.key
      - xpack.security.transport.ssl.certificate=certs/es02/es02.crt
      - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
      - xpack.security.transport.ssl.verification_mode=certificate
      - xpack.license.self_generated.type=${LICENSE}
    mem_limit: ${MEM_LIMIT}
    ulimits:
      memlock:
        soft: -1
        hard: -1
    healthcheck:
      test:
        [
          "CMD-SHELL",
          "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
        ]
      interval: 10s
      timeout: 10s
      retries: 120

  es03:
    depends_on:
      - es02
    image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
    volumes:
      - certs:/usr/share/elasticsearch/config/certs
      - esdata03:/usr/share/elasticsearch/data
    environment:
      - node.name=es03
      - cluster.name=${CLUSTER_NAME}
      - cluster.initial_master_nodes=es01,es02,es03
      - discovery.seed_hosts=es01,es02
      - bootstrap.memory_lock=true
      - xpack.security.enabled=true
      - xpack.security.http.ssl.enabled=true
      - xpack.security.http.ssl.key=certs/es03/es03.key
      - xpack.security.http.ssl.certificate=certs/es03/es03.crt
      - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
      - xpack.security.transport.ssl.enabled=true
      - xpack.security.transport.ssl.key=certs/es03/es03.key
      - xpack.security.transport.ssl.certificate=certs/es03/es03.crt
      - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
      - xpack.security.transport.ssl.verification_mode=certificate
      - xpack.license.self_generated.type=${LICENSE}
    mem_limit: ${MEM_LIMIT}
    ulimits:
      memlock:
        soft: -1
        hard: -1
    healthcheck:
      test:
        [
          "CMD-SHELL",
          "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
        ]
      interval: 10s
      timeout: 10s
      retries: 120

  kibana:
    depends_on:
      es01:
        condition: service_healthy
      es02:
        condition: service_healthy
      es03:
        condition: service_healthy
    image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
    volumes:
      - certs:/usr/share/kibana/config/certs
      - kibanadata:/usr/share/kibana/data
    ports:
      - ${KIBANA_PORT}:5601
    environment:
      - SERVERNAME=kibana
      - ELASTICSEARCH_HOSTS=https://es01:9200
      - ELASTICSEARCH_USERNAME=kibana_system
      - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
      - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
    mem_limit: ${MEM_LIMIT}
    healthcheck:
      test:
        [
          "CMD-SHELL",
          "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
        ]
      interval: 10s
      timeout: 10s
      retries: 120

volumes:
  certs:
    driver: local
  esdata01:
    driver: local
  esdata02:
    driver: local
  esdata03:
    driver: local
  kibanadata:
    driver: local

networks:
  es-test:
    driver: bridge

单机版

version: "2.2"

services:
  setup:
    image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
    volumes:
      - certs:/usr/share/elasticsearch/config/certs
    user: "0"
    command: >
      bash -c '
        if [ x${ELASTIC_PASSWORD} == x ]; then
          echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
          exit 1;
        elif [ x${KIBANA_PASSWORD} == x ]; then
          echo "Set the KIBANA_PASSWORD environment variable in the .env file";
          exit 1;
        fi;
        if [ ! -f config/certs/ca.zip ]; then
          echo "Creating CA";
          bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
          unzip config/certs/ca.zip -d config/certs;
        fi;
        if [ ! -f config/certs/certs.zip ]; then
          echo "Creating certs";
          echo -ne \
          "instances:\n"\
          "  - name: es01\n"\
          "    dns:\n"\
          "      - es01\n"\
          "      - localhost\n"\
          "    ip:\n"\
          "      - 127.0.0.1\n"\
          "  - name: es02\n"\
          "    dns:\n"\
          "      - es02\n"\
          "      - localhost\n"\
          "    ip:\n"\
          "      - 127.0.0.1\n"\
          "  - name: es03\n"\
          "    dns:\n"\
          "      - es03\n"\
          "      - localhost\n"\
          "    ip:\n"\
          "      - 127.0.0.1\n"\
          > config/certs/instances.yml;
          bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
          unzip config/certs/certs.zip -d config/certs;
        fi;
        echo "Setting file permissions"
        chown -R root:root config/certs;
        find . -type d -exec chmod 750 \{\} \;;
        find . -type f -exec chmod 640 \{\} \;;
        echo "Waiting for Elasticsearch availability";
        until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
        echo "Setting kibana_system password";
        until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
        echo "All done!";
      '
    healthcheck:
      test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
      interval: 1s
      timeout: 5s
      retries: 120

  es01:
    depends_on:
      setup:
        condition: service_healthy
    image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
    volumes:
      - certs:/usr/share/elasticsearch/config/certs
      - esdata01:/usr/share/elasticsearch/data
    ports:
      - ${ES_PORT}:9200
    environment:
      - node.name=es01
      - cluster.name=${CLUSTER_NAME}
      - cluster.initial_master_nodes=es01
      - discovery.seed_hosts=es02,es03
      - ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
      - bootstrap.memory_lock=true
      - xpack.security.enabled=true
      - xpack.security.http.ssl.enabled=true
      - xpack.security.http.ssl.key=certs/es01/es01.key
      - xpack.security.http.ssl.certificate=certs/es01/es01.crt
      - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
      - xpack.security.transport.ssl.enabled=true
      - xpack.security.transport.ssl.key=certs/es01/es01.key
      - xpack.security.transport.ssl.certificate=certs/es01/es01.crt
      - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
      - xpack.security.transport.ssl.verification_mode=certificate
      - xpack.license.self_generated.type=${LICENSE}
    mem_limit: ${MEM_LIMIT}
    ulimits:
      memlock:
        soft: -1
        hard: -1
    healthcheck:
      test:
        [
          "CMD-SHELL",
          "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
        ]
      interval: 10s
      timeout: 10s
      retries: 120


  kibana:
    depends_on:
      es01:
        condition: service_healthy
    image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
    volumes:
      - certs:/usr/share/kibana/config/certs
      - kibanadata:/usr/share/kibana/data
    ports:
      - ${KIBANA_PORT}:5601
    environment:
      - SERVERNAME=kibana
      - ELASTICSEARCH_HOSTS=https://es01:9200
      - ELASTICSEARCH_USERNAME=kibana_system
      - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
      - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
    mem_limit: ${MEM_LIMIT}
    healthcheck:
      test:
        [
          "CMD-SHELL",
          "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
        ]
      interval: 10s
      timeout: 10s
      retries: 120

volumes:
  certs:
    driver: local
  esdata01:
    driver: local
  kibanadata:
    driver: local

networks:
  es-test:
    driver: bridge

kibana改为中文

只需要修改kibana配置文件,添加一行:i18n.locale: “zh-CN”,即可
kibana默认安装配置文件路径:/usr/share/kibana/config/kibana.yml

#docker exec -it es-8x-kibana-1  /bin/bash
 
docker cp es-8x-kibana-1:/usr/share/kibana/config/kibana.yml kibana.yml
docker cp kibana.yml es-8x-kibana-1:/usr/share/kibana/config/kibana.yml
docker restart es-8x-kibana-1

debezium

#官方命令
docker run -it -d --name connect \
-p 8083:8083 \
-e GROUP_ID=1 \
-e CONFIG_STORAGE_TOPIC=my-connect-configs \
-e OFFSET_STORAGE_TOPIC=my-connect-offsets \
-e ADVERTISED_HOST_NAME=$(echo $DOCKER_HOST | cut -f3 -d '/' | cut -f1 -d':') \
--link zookeeper:zookeeper \
--link kafka:kafka \
debezium/connect


# 不参与链接
docker run -it -d --name connect \
-p 8183:8083 \
-e GROUP_ID=1 \
-e CONFIG_STORAGE_TOPIC=my-connect-configs \
-e OFFSET_STORAGE_TOPIC=my-connect-offsets \
-e ADVERTISED_HOST_NAME=$(echo $DOCKER_HOST | cut -f3 -d '/' | cut -f1 -d':') \
-e BOOTSTRAP_SERVERS=192.168.110.30:9094 \
debezium/connect:2.4


docker run -it -d --name connect \
-p 8183:8083 \
-e GROUP_ID=1 \
-e CONFIG_STORAGE_TOPIC=my-connect-configs \
-e OFFSET_STORAGE_TOPIC=my-connect-offsets \
-e ADVERTISED_HOST_NAME=$(echo $DOCKER_HOST | cut -f3 -d '/' | cut -f1 -d':') \
debezium/connect
version: "2.2"
services:
  jobmanager:
    image: flink:1.17-java8
    ports:
      - "8081:8081"
    command: jobmanager
    environment:
      - |
        FLINK_PROPERTIES=
        jobmanager.rpc.address: jobmanager 
    TZ: Asia/Shanghai       

  taskmanager:
    image: flink:1.17-java8
    depends_on:
      - jobmanager
    command: taskmanager
    scale: 1
    environment:
      - |
        FLINK_PROPERTIES=
        jobmanager.rpc.address: jobmanager
        taskmanager.numberOfTaskSlots: 2    
    TZ: Asia/Shanghai

持久化

version: "2.2"
services:
  jobmanager:
    image: flink:1.17-java8
    ports:
      - "8081:8081"
    command: jobmanager
    restart: always
    environment:
      FLINK_PROPERTIES: |
        jobmanager.rpc.address: jobmanager    
      TZ: Asia/Shanghai        
    volumes:
      - jobmanager_data:/opt/flink/data

  taskmanager:
    image: flink:1.17-java8
    depends_on:
      - jobmanager
    command: taskmanager
    restart: always
    scale: 1
    environment:
        FLINK_PROPERTIES: |
            jobmanager.rpc.address: jobmanager
            taskmanager.numberOfTaskSlots: 2
        TZ: Asia/Shanghai    
    volumes:
      - taskmanager_data:/opt/flink/data

volumes:
  jobmanager_data:
  taskmanager_data:

kafka-单机版

bitnami版本

chown -R 1001:1001 /data/kafka-persistence
version: "3"
services:
  kafka:
    image: 'bitnami/kafka:latest'
    restart: always
    ports:
      - '9093:9092'
      - '9094:9094'
    environment:
      - KAFKA_CFG_NODE_ID=0
      - KAFKA_CFG_PROCESS_ROLES=controller,broker
      - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093
      - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
      - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094
      - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://xxxxxxxxxxxxxxx:9094
      - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT
      - TZ=Asia/Shanghai   
    volumes:
      - /data/kafka-persistence:/bitnami/kafka
  
  kafka-map :
    image: dushixiang/kafka-map               ## 镜像:开源的web管理kafka集群的界面
    container_name: kafka-map
    restart: always
    environment:
        DEFAULT_USERNAME: admin
        DEFAULT_PASSWORD: CeF4f332PfSQPvjQfy                        ## 修改:宿主机IP
    ports:  
      - "9011:8080"                                 ## 暴露端口 9000这个端口冲突太多
    volumes: 
      - ../kafka-map/data:/usr/local/kafka-map/data

MQTT(emqx)

默认账户密码: admin public

m.daocloud.io/docker.io/emqx:latest
docker run -d --name emqx -p 18083:18083 -p 1883:1883 emqx:latest

docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 m.daocloud.io/docker.io/emqx:latest



#路径映射(无配置文件)
docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 -v $PWD/emqx/data:/opt/emqx/data -v $PWD/emqx/log:/opt/emqx/log m.daocloud.io/emqx/emqx:latest


#配置文件复制
docker cp emqx:/opt/emqx/etc/emqx.conf $PWD/emqx/config/emqx.conf
#包含配置文件
docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 -v $PWD/emqx/data:/opt/emqx/data -v $PWD/emqx/log:/opt/emqx/log -v $PWD/emqx/config/emqx.conf:/opt/emqx/etc/emqx.conf m.daocloud.io/emqx/emqx:latest

docker-compose

version: '3'
services:
  emqx:
    image: emqx:latest
    restart: always
    environment:
      EMQX_NODE__NAME: foo_emqx@172.16.0.169
    volumes:
      - vol-emqx-data:/opt/emqx/data
      - vol-emqx-log:/opt/emqx/log
    ports:  
      - "1883:1883"
      - "8083:8083"
      - "18083:18083"
docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx:5.8.1