Tiresias/docker-compose.yml

92 lines
2.1 KiB
YAML

services:
redis:
image: redis:7
container_name: redis_server
ports:
- "6379:6379"
stream_reader:
build:
dockerfile: Dockerfile
context: reader
container_name: stream_reader
depends_on:
- redis
environment:
stream_url: "rtsp://admin:labvision2019@10.1.8.182:554"
# stream_url: "/videos/video.mp4"
stream_label: "pelea3"
redis_host: "redis"
redis_port: "6379"
redis_db: "0"
restart: unless-stopped
# volumes:
# - /home/ifiguero/DIA/dia3/DIA0205 - VISIÓN COMPUTACIONAL APLICADA/Profesor Cristian Aguilera/dataset/pelea3.mp4:/videos/video.mp4:ro
stream_preprocess:
build:
dockerfile: Dockerfile
context: preprocess
container_name: stream_preprocess
depends_on:
- redis
- stream_reader
- stream_inference
environment:
stream_label: "pelea3"
redis_host: "redis"
redis_port: "6379"
redis_db: "0"
restart: unless-stopped
stream_inference:
build:
dockerfile: Dockerfile
context: inference
container_name: stream_inference
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
depends_on:
- redis
environment:
model_folder: "/app/models"
model_name: "tuned"
out_folder: "/app/out_folder"
stream_label: "pelea3"
threshold: "0.75"
redis_host: "redis"
redis_port: "6379"
redis_db: "0"
restart: unless-stopped
volumes:
- ./rt_out:/app/out_folder
- ./models:/app/models
web_inference:
build:
dockerfile: Dockerfile
context: web
container_name: web_inference
depends_on:
- redis
- stream_inference
environment:
stream_url: "rtsp://admin:labvision2019@10.1.8.182:554"
# stream_url: "/videos/video.mp4"
out_folder: "/app/out_folder"
stream_label: "pelea3"
redis_host: "redis"
redis_port: "6379"
redis_db: "0"
restart: unless-stopped
volumes:
- ./rt_out:/app/out_folder
ports:
- "8080:8080"