测试常用中间件快速启动
MySQL 5.7
version: '3.3'
services:
mysql:
image: 'mysql:5.7'
container_name: mysql
# network_mode: host
restart: always
environment:
- TZ=Asia/Shanghai
- MYSQL_ROOT_PASSWORD=ROOT_PASSWORD
- MYSQL_DATABASE=DB1
- MYSQL_USER=DB_USERNAME
- MYSQL_PASSWORD=DB_PASSWROD
- MYSQL_ROOT_HOST=%
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
volumes:
- '${PWD}/data/mysql-data:/var/lib/mysql'
expose:
- 3306
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "5"
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-uroot", "-p123456",]
interval: 30s
timeout: 10s
MySQL 8.X
version: '3.8'
services:
mysql:
image: mysql:8.0
restart: always
environment:
MYSQL_ROOT_PASSWORD: your_root_password
MYSQL_DATABASE: your_database
MYSQL_USER: your_username
MYSQL_PASSWORD: your_password
MYSQL_ROOT_HOST: '%'
MYSQL_DEFAULT_AUTHENTICATION_PLUGIN: mysql_native_password # 设置使用 MySQL 5.7 的加密算法
command:
#- --default-authentication-plugin=caching_sha2_password
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_general_ci
- --explicit_defaults_for_timestamp=true
volumes:
- ./data:/var/lib/mysql
ports:
- "3306:3306
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "127.0.0.1", "--silent"]
interval: 3s
retries: 5
start_period: 30s
Redis
version: '3'
services:
redis:
image: redis:latest
container_name: redis
command: ["redis-server", "--requirepass", "123456"]
ports:
- "6379:6379"
volumes:
- ./data:/data
environment:
- TZ=Asia/Shanghai
Kafka
version: '3.8'
services:
zookeeper:
image: bitnami/zookeeper:latest
container_name: zookeeper
ports:
- "2181:2181"
environment:
ALLOW_ANONYMOUS_LOGIN: "yes"
TZ: "Asia/Shanghai"
user: "root" # 解决 Permission denied 错误
volumes:
- ./zookeeper/data:/bitnami/zookeeper/data
- ./zookeeper/logs:/bitnami/zookeeper/logs
kafka:
image: bitnami/kafka:latest
container_name: kafka
ports:
- "9092:9092"
environment:
TZ: "Asia/Shanghai"
KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_CREATE_TOPICS: "topic1:1:1,topic2:2:1,topic3:3:1"
KAFKA_LOG_RETENTION_HOURS: 24
volumes:
- ./kafka/data:/bitnami/kafka/data
user: "root" # ERROR Disk error while locking directory /bitnami/kafka/data
Rabbimq
version: '3.8'
services:
rabbitmq:
image: rabbitmq:3.9-management
container_name: rabbitmq
hostname: rabbitmq
ports:
- "5672:5672" # RabbitMQ 默认端口
- "15672:15672" # RabbitMQ Web UI 端口
volumes:
- ./rabbitmq_data:/var/lib/rabbitmq
environment:
- TZ=Asia/Shanghai # 设置时区为上海时区
- RABBITMQ_DEFAULT_USER=admin # RabbitMQ 默认用户名
- RABBITMQ_DEFAULT_PASS=password # RabbitMQ 默认密码
打开 web management ui
通过
ip a
查看 ubuntu 的 ip打开浏览器,输入
http://[ip]:15672
访问
Mongo
version: '3.8'
services:
mongodb:
image: mongo:latest
container_name: mongodb
ports:
- "27017:27017"
volumes:
- ./mongodb_data:/data/db
environment:
TZ: Asia/Shanghai
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: example
command: mongod --auth
restart: always
InfluxDB
docker run -d --name influxdb -p 8086:8086 -v ./influxdb:/var/lib/influxdb influxdb:1.8
MinIO
docker run -d --name minio \
-p 9000:9000 -p 9001:9001 \
-v ./data:/data \
-e "TZ=Asia/Shanghai" \
-e "MINIO_ROOT_USER=access_key" \
-e "MINIO_ROOT_PASSWORD=secret_key" \
quay.io/minio/minio server --console-address ":9001" /data
rabbiq单节点或集群
version: '3'
services:
rabbit:
container_name: rabbit
image: rabbitmq:3.7-management-alpine
restart: always
network_mode: host
hostname: rabbit1
extra_hosts:
- rabbit1:192.168.182.129 # 节点一
- rabbit2:192.168.182.130 # 节点二
- rabbit3:192.168.182.131 # 节点三
environment:
- TZ=Asia/Shanghai
- RABBITMQ_ERLANG_COOKIE=${MY_COOKIE:-MY_COOKIE} # 读取同级目录下 .env 文件,不存在则 MY_COOKIE
- RABBITMQ_DEFAULT_USER=${MY_USER:-MY_USER} # 读取同级目录下 .env 文件,不存在则 MY_USER
- RABBITMQ_DEFAULT_PASS=${MY_PASS:-MY_PASS} # 读取同级目录下 .env 文件,不存在则 MY_PASS
# - RABBITMQ_LOGS=
# - RABBITMQ_LOG_BASE=/var/log/rabbitmq
#volumes:
# - ./rabbitmq/log:/var/log/rabbitmq
#ports:
# - "5672:5672"
# - "15672:15672"
nacos2.3.0单节点或集群
支持集群|单节点
节点一: 192.168.182.129
节点二: 192.168.182.130
节点三: 192.168.182.131
注意: 数据库需要提前准备好;
单机模式 - 用于测试和单机试用。 standalone
集群模式 - 用于生产环境,确保高可用。
多集群模式 - 用于多数据中心场景。
version: '3'
services:
nacos:
container_name: nacos-server
image: nacos/nacos-server:v2.3.0
restart: always
hostname: nacos-server
network_mode: host
#ports:
# - 8845:8848
# - 9845:9848
# - 9841:9849
environment:
- MODE=cluster # 单节点: standalone, 集群模式: cluster
#- PREFER_HOST_MODE=hostname # 注册时,使用主机名,默认IP
- NACOS_AUTH_ENABLE=true
- NACOS_AUTH_IDENTITY_KEY=nacos
- NACOS_AUTH_IDENTITY_VALUE=nacos
- NACOS_AUTH_TOKEN=SecretKey012345678901234567890123456789012345678901234567890123456789
- NACOS_SERVERS=192.168.182.129:8848 192.168.182.130:8848 192.168.182.131:8848 # 单节点写一个IP或去除
- SPRING_DATASOURCE_PLATFORM=mysql
- MYSQL_SERVICE_HOST=192.168.182.129
- MYSQL_SERVICE_PORT=3306
- MYSQL_SERVICE_USER=nacos
- MYSQL_SERVICE_PASSWORD=nacos
- MYSQL_SERVICE_DB_NAME=nacos
- JVM_XMS=1024m
- JVM_XMX=1024m
- JVM_XMN=128m
ELK
目录结构
mkdir elasticsearch kibana logstash
chmod -R 777 elasticsearch
chmod -R 777 kibana
chmod -R 777 logstash
logstash 配置
cat logstash/pipeline/logstash.conf
input {
beats {
host => "0.0.0.0"
port => "4560"
}
}
filter {
# drop field
mutate {
gsub => ["message", "\[\d+m",""]
remove_field => ["_index","_type","_id","_version","@version","_score","host","agent","tags","log","input","ecs"]
}
# Only nginx message json
if [server_type] in ["nginx","nginx-proxy"] {
json {
source => "message"
}
date {
match => ["@timestamp", "yyyy-MM-dd HH:mm:ss.SSS"]
target => "@timestamp"
}
}
}
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "%{[server_type]}-%{+YYYY.MM.dd}"
action => "index"
codec => "json_lines"
}
#stdout { codec => "rubydebug" }
}
yaml 文件
version: '3'
services:
elasticsearch:
image: elasticsearch:7.8.0
container_name: elasticsearch
ports:
- "9200:9200"
environment:
- cluster.name=elasticsearch # 设置集群名称为elasticsearch
- discovery.type=single-node # 以单一节点模式启动
- ES_JAVA_OPTS=-Xms2g -Xmx2g # 设置使用jvm内存大小
- http.max_content_length=1gb # 設置發送數據大小
- cluster.max_shards_per_node=3000 # 设置最大分片数量
- TZ=Asia/Shanghai # 设置容器时区为北京时间
# 其他
- indices.fielddata.cache.size=20% # 使用总堆内存的百分比,提高查询性能,减少内存使用
- indices.breaker.total.use_real_memory=false # 精确限制资源,是否使用真实内存作为断路器的监控依据
- indices.breaker.fielddata.limit=40% # 限制 Field Data Cache 使用的内存量
- indices.breaker.request.limit=40% # 控制索引请求操作的总内存限制,默认为 60%。
- indices.breaker.total.limit=75% # 控制索引操作总内存的限制,默认为 70%。
volumes:
- ./elasticsearch/data:/usr/share/elasticsearch/data
- ./elasticsearch/plugins:/usr/share/elasticsearch/plugins
networks:
- elk-network
deploy:
resources:
limits:
memory: 2g # 限制容器使用的内存资源为2GB
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "5"
logstash:
image: logstash:7.8.0
container_name: logstash
ports:
- "4560:4560"
environment:
- LANG=zh_CN.UTF-8
- LC_ALL=zh_CN.UTF-8
- TZ=Asia/Shanghai
#- xpack.monitoring.elasticsearch.hosts=['http://127.0.0.1:9200'] # network_mode: host 模式使用指定 es
depends_on:
- elasticsearch
volumes:
- ./logstash/data:/usr/share/logstash/data
- ./logstash/plugins:/usr/share/logstash/plugins
- ./logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
networks:
- elk-network
deploy:
resources:
limits:
memory: 1g # 限制容器使用的内存资源为1GB
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "5"
kibana:
image: kibana:7.8.0
container_name: kibana
ports:
- "5601:5601"
depends_on:
- elasticsearch
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
- I18N_LOCALE=zh-CN
volumes:
- ./kibana/data:/usr/share/kibana/data
- ./kibana/plugins:/usr/share/kibana/plugins
networks:
- elk-network
deploy:
resources:
limits:
memory: 1g # 限制容器使用的内存资源为1GB
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "5"
networks:
elk-network:
driver: bridge
Last updated