networks: vpn: name: kafkaVPN driver: bridge ipam: config: - subnet: "192.168.2.0/24" gateway: "192.168.2.1" services: # Ниже идет то, что в облаке # cloud: # networks: # - vpn # build: ./Cloud/ # ports: # - "5124:5124" # environment: # ASPNETCORE_ENVIRONMENT: Development # DB_CONNECTION_STRING: ${DB_CONNECTION_STRING} # REDDIS_URL: redis:6379 # KAFKA_URL: kafka:29092 # # Добавить, когда будет фронт! # # FRONT_URL: front:3000 # depends_on: # - postgres # - redis # postgres: # image: postgres:14 # container_name: cucumber_database # environment: # POSTGRES_USER: ${POSTGRES_USER} # POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} # POSTGRES_DB: ${POSTGRES_DB} # ports: # - "5438:5432" # volumes: # - postgres_data:/var/lib/postgresql/data # redis: # image: 'redis:latest' # ports: # - '6379:6379' # volumes: # - 'cloud-redis:/data' # healthcheck: # test: # - CMD # - redis-cli # - ping # retries: 3 # timeout: 5s # ------------------------------------------------ # Ниже идет то, что на ферме # zookeeper: # networks: # - vpn # image: confluentinc/cp-zookeeper:7.4.0 # environment: # ZOOKEEPER_CLIENT_PORT: 2181 # ZOOKEEPER_TICK_TIME: 2000 # ports: # - 2181:2181 # kafka: # networks: # vpn: # ipv4_address: 192.168.2.10 # image: confluentinc/cp-kafka:7.4.0 # ports: # - 9092:9092 # - 9997:9997 # expose: # - 29092:29092 # environment: # KAFKA_BROKER_ID: 1 # KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 # KAFKA_LISTENERS: HOST://0.0.0.0:9092,DOCKER://0.0.0.0:29092 # KAFKA_ADVERTISED_LISTENERS: HOST://192.168.1.5:9092,DOCKER://kafka:29092 # KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: DOCKER:PLAINTEXT,HOST:PLAINTEXT # KAFKA_INTER_BROKER_LISTENER_NAME: DOCKER # KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 # KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 # KAFKA_LOG_FLUSH_INTERVAL_MESSAGES: 10000 # KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 # depends_on: # - zookeeper # init-kafka: # networks: # - vpn # image: confluentinc/cp-kafka:7.4.0 # depends_on: # - kafka # entrypoint: [ '/bin/sh', '-c' ] # command: | # " # # blocks until kafka is reachable # kafka-topics --bootstrap-server kafka:29092 --list # echo -e 'Creating kafka topics' # kafka-topics --bootstrap-server kafka:29092 --create --if-not-exists --topic commands --replication-factor 1 --partitions 1 # kafka-topics --bootstrap-server kafka:29092 --create --if-not-exists --topic data --replication-factor 1 --partitions 1 # kafka-topics --bootstrap-server kafka:29092 --create --if-not-exists --topic response --replication-factor 1 --partitions 1 # echo -e 'Successfully created the following topics:' # kafka-topics --bootstrap-server kafka:29092 --list # " # kafka-ui: # networks: # - vpn # container_name: kafka-ui # image: provectuslabs/kafka-ui:latest # ports: # - 8080:8080 # depends_on: # - kafka # environment: # KAFKA_CLUSTERS_0_NAME: local # KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 # KAFKA_CLUSTERS_0_METRICS_PORT: 9997 redis-farm: networks: - vpn image: 'redis:latest' ports: - '6380:6379' volumes: - 'farm-redis:/data' healthcheck: test: - CMD - redis-cli - ping retries: 3 timeout: 5s redis-insight: networks: - vpn image: redis/redisinsight:latest restart: always ports: - "5540:5540" volumes: - redis-insight:/data manager: networks: - vpn build: context: . dockerfile: ./GreenhouseManager/Dockerfile environment: KAFKA_URL: kafka:29092 depends_on: - kafka expose: - 20002 detector: networks: - vpn build: context: . dockerfile: ./GreenhouseDetector/Dockerfile environment: MANAGER_URL: http://manager:20002 depends_on: - manager volumes: postgres_data: driver: local cloud-redis: driver: local farm-redis: driver: local redis-insight: driver: local