Cucumber/docker-compose.yml

175 lines
4.4 KiB
YAML
Raw Permalink Normal View History

2024-11-13 01:14:32 +04:00
networks:
vpn:
name: kafkaVPN
driver: bridge
ipam:
config:
- subnet: "192.168.2.0/24"
gateway: "192.168.2.1"
2024-10-28 00:25:51 +04:00
services:
# Ниже идет то, что в облаке
# cloud:
# networks:
# - vpn
# build: ./Cloud/
# ports:
# - "5124:5124"
# environment:
# ASPNETCORE_ENVIRONMENT: Development
# DB_CONNECTION_STRING: ${DB_CONNECTION_STRING}
# REDDIS_URL: redis:6379
# KAFKA_URL: kafka:29092
# # Добавить, когда будет фронт!
# # FRONT_URL: front:3000
# depends_on:
# - postgres
# - redis
# postgres:
# image: postgres:14
# container_name: cucumber_database
# environment:
# POSTGRES_USER: ${POSTGRES_USER}
# POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
# POSTGRES_DB: ${POSTGRES_DB}
# ports:
# - "5438:5432"
# volumes:
# - postgres_data:/var/lib/postgresql/data
# redis:
# image: 'redis:latest'
# ports:
# - '6379:6379'
# volumes:
# - 'cloud-redis:/data'
# healthcheck:
# test:
# - CMD
# - redis-cli
# - ping
# retries: 3
# timeout: 5s
# ------------------------------------------------
# Ниже идет то, что на ферме
# zookeeper:
# networks:
# - vpn
# image: confluentinc/cp-zookeeper:7.4.0
# environment:
# ZOOKEEPER_CLIENT_PORT: 2181
# ZOOKEEPER_TICK_TIME: 2000
# ports:
# - 2181:2181
# kafka:
# networks:
# vpn:
# ipv4_address: 192.168.2.10
# image: confluentinc/cp-kafka:7.4.0
# ports:
# - 9092:9092
# - 9997:9997
# expose:
# - 29092:29092
# environment:
# KAFKA_BROKER_ID: 1
# KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
# KAFKA_LISTENERS: HOST://0.0.0.0:9092,DOCKER://0.0.0.0:29092
# KAFKA_ADVERTISED_LISTENERS: HOST://192.168.1.5:9092,DOCKER://kafka:29092
# KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: DOCKER:PLAINTEXT,HOST:PLAINTEXT
# KAFKA_INTER_BROKER_LISTENER_NAME: DOCKER
# KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
# KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
# KAFKA_LOG_FLUSH_INTERVAL_MESSAGES: 10000
# KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
# depends_on:
# - zookeeper
# init-kafka:
# networks:
# - vpn
# image: confluentinc/cp-kafka:7.4.0
# depends_on:
# - kafka
# entrypoint: [ '/bin/sh', '-c' ]
# command: |
# "
# # blocks until kafka is reachable
# kafka-topics --bootstrap-server kafka:29092 --list
# echo -e 'Creating kafka topics'
# kafka-topics --bootstrap-server kafka:29092 --create --if-not-exists --topic commands --replication-factor 1 --partitions 1
# kafka-topics --bootstrap-server kafka:29092 --create --if-not-exists --topic data --replication-factor 1 --partitions 1
# kafka-topics --bootstrap-server kafka:29092 --create --if-not-exists --topic response --replication-factor 1 --partitions 1
# echo -e 'Successfully created the following topics:'
# kafka-topics --bootstrap-server kafka:29092 --list
# "
# kafka-ui:
# networks:
# - vpn
# container_name: kafka-ui
# image: provectuslabs/kafka-ui:latest
# ports:
# - 8080:8080
# depends_on:
# - kafka
# environment:
# KAFKA_CLUSTERS_0_NAME: local
# KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
# KAFKA_CLUSTERS_0_METRICS_PORT: 9997
redis-farm:
networks:
- vpn
2024-11-10 13:35:23 +04:00
image: 'redis:latest'
ports:
- '6380:6379'
2024-11-10 13:35:23 +04:00
volumes:
- 'farm-redis:/data'
2024-11-10 13:35:23 +04:00
healthcheck:
2024-11-19 23:33:16 +04:00
test:
- CMD
- redis-cli
- ping
2024-11-19 23:33:16 +04:00
retries: 3
timeout: 5s
redis-insight:
networks:
- vpn
image: redis/redisinsight:latest
restart: always
ports:
- "5540:5540"
volumes:
- redis-insight:/data
manager:
networks:
- vpn
build:
context: .
dockerfile: ./GreenhouseManager/Dockerfile
environment:
KAFKA_URL: kafka:29092
depends_on:
- kafka
expose:
- 20002
detector:
networks:
- vpn
build:
context: .
dockerfile: ./GreenhouseDetector/Dockerfile
environment:
MANAGER_URL: http://manager:20002
depends_on:
- manager
2024-10-28 00:25:51 +04:00
volumes:
postgres_data:
2024-11-10 13:35:23 +04:00
driver: local
cloud-redis:
driver: local
farm-redis:
driver: local
redis-insight:
driver: local