blob: 4d35faa576729d8bf76fa1bcc2088c21655ef310 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
|
version: '3.6'
services:
ollama:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities:
- gpu
volumes:
- ollama:/root/.ollama
# Uncomment below to expose Ollama API outside the container stack
ports:
- 11434:11434
pull_policy: always
tty: true
restart: "no"
image: ollama/ollama:0.1.44
ollama-webui:
build:
context: ./ollama-webui
args:
OLLAMA_API_BASE_URL: '/ollama/api'
dockerfile: Dockerfile
image: reg.reaweb.uk/ollama-webui
depends_on:
- ollama
ports:
- 3000:8080
environment:
- "OLLAMA_API_BASE_URL=http://ollama:11434/api"
extra_hosts:
- host.docker.internal:host-gateway
restart: "no"
whisper-asr-webservice-gpu:
build:
context: ./whisper-asr-webservice
dockerfile: Dockerfile.gpu
image: reg.reaweb.uk/whisper-web
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
environment:
- ASR_MODEL=base.en
- ASR_ENGINE=openai_whisper
ports:
- "9000:9000"
volumes:
- ./whisper-asr-webservice/app:/app/app
- cache-pip:/root/.cache/pip
- cache-poetry:/root/.cache/poetry
- cache-whisper:/root/.cache/whisper
restart: "no"
volumes:
ollama:
cache-pip:
cache-poetry:
cache-whisper:
cache-faster-whisper:
|