Skip to content

Commit 4491a89

Browse files
committed
add AudioQnA for openEuler
Signed-off-by: zhihang <zhihangdeng@link.cuhk.edu.cn>
1 parent d9cd9ee commit 4491a89

File tree

5 files changed

+264
-0
lines changed

5 files changed

+264
-0
lines changed

AudioQnA/Dockerfile.openEuler

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
ARG IMAGE_REPO=opea
5+
ARG BASE_TAG=latest
6+
FROM $IMAGE_REPO/comps-base:$BASE_TAG-openeuler
7+
8+
COPY ./audioqna.py $HOME/audioqna.py
9+
10+
ENTRYPOINT ["python", "audioqna.py"]
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
services:
5+
whisper-service:
6+
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}-openeuler
7+
container_name: whisper-service
8+
ports:
9+
- ${WHISPER_SERVER_PORT:-7066}:7066
10+
ipc: host
11+
environment:
12+
no_proxy: ${no_proxy}
13+
http_proxy: ${http_proxy}
14+
https_proxy: ${https_proxy}
15+
restart: unless-stopped
16+
speecht5-service:
17+
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}-openeuler
18+
container_name: speecht5-service
19+
ports:
20+
- ${SPEECHT5_SERVER_PORT:-7055}:7055
21+
ipc: host
22+
environment:
23+
no_proxy: ${no_proxy}
24+
http_proxy: ${http_proxy}
25+
https_proxy: ${https_proxy}
26+
restart: unless-stopped
27+
vllm-service:
28+
image: openeuler/vllm-cpu:0.9.1-oe2403lts
29+
container_name: vllm-service
30+
ports:
31+
- ${LLM_SERVER_PORT:-3006}:80
32+
volumes:
33+
- "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub"
34+
shm_size: 128g
35+
privileged: true
36+
environment:
37+
no_proxy: ${no_proxy}
38+
http_proxy: ${http_proxy}
39+
https_proxy: ${https_proxy}
40+
HF_TOKEN: ${HF_TOKEN}
41+
LLM_MODEL_ID: ${LLM_MODEL_ID}
42+
VLLM_TORCH_PROFILER_DIR: "/mnt"
43+
LLM_SERVER_PORT: ${LLM_SERVER_PORT}
44+
VLLM_CPU_OMP_THREADS_BIND: all
45+
VLLM_CPU_KVCACHE_SPACE: 30
46+
healthcheck:
47+
test: ["CMD-SHELL", "curl -f http://$host_ip:${LLM_SERVER_PORT}/health || exit 1"]
48+
interval: 10s
49+
timeout: 10s
50+
retries: 100
51+
command: --model ${LLM_MODEL_ID} --host 0.0.0.0 --port 80
52+
audioqna-xeon-backend-server:
53+
image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}-openeuler
54+
container_name: audioqna-xeon-backend-server
55+
depends_on:
56+
- whisper-service
57+
- vllm-service
58+
- speecht5-service
59+
ports:
60+
- "3008:8888"
61+
environment:
62+
- no_proxy=${no_proxy}
63+
- https_proxy=${https_proxy}
64+
- http_proxy=${http_proxy}
65+
- MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP}
66+
- WHISPER_SERVER_HOST_IP=${WHISPER_SERVER_HOST_IP}
67+
- WHISPER_SERVER_PORT=${WHISPER_SERVER_PORT}
68+
- LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP}
69+
- LLM_SERVER_PORT=${LLM_SERVER_PORT}
70+
- LLM_MODEL_ID=${LLM_MODEL_ID}
71+
- SPEECHT5_SERVER_HOST_IP=${SPEECHT5_SERVER_HOST_IP}
72+
- SPEECHT5_SERVER_PORT=${SPEECHT5_SERVER_PORT}
73+
ipc: host
74+
restart: always
75+
audioqna-xeon-ui-server:
76+
image: ${REGISTRY:-opea}/audioqna-ui:${TAG:-latest}-openeuler
77+
container_name: audioqna-xeon-ui-server
78+
depends_on:
79+
- audioqna-xeon-backend-server
80+
ports:
81+
- "5173:5173"
82+
environment:
83+
- no_proxy=${no_proxy}
84+
- https_proxy=${https_proxy}
85+
- http_proxy=${http_proxy}
86+
- CHAT_URL=${BACKEND_SERVICE_ENDPOINT}
87+
ipc: host
88+
restart: always
89+
90+
networks:
91+
default:
92+
driver: bridge

AudioQnA/docker_image_build/build.yaml

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,29 @@ services:
1313
context: ../
1414
dockerfile: ./Dockerfile
1515
image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}
16+
audioqna-openeuler:
17+
build:
18+
args:
19+
IMAGE_REPO: ${REGISTRY}
20+
BASE_TAG: ${TAG}
21+
http_proxy: ${http_proxy}
22+
https_proxy: ${https_proxy}
23+
no_proxy: ${no_proxy}
24+
context: ../
25+
dockerfile: ./Dockerfile.openEuler
26+
image: ${REGISTRY:-opea}/audioqna:${TAG:-latest}-openeuler
1627
audioqna-ui:
1728
build:
1829
context: ../ui
1930
dockerfile: ./docker/Dockerfile
2031
extends: audioqna
2132
image: ${REGISTRY:-opea}/audioqna-ui:${TAG:-latest}
33+
audioqna-ui-openeuler:
34+
build:
35+
context: ../ui
36+
dockerfile: ./docker/Dockerfile.openEuler
37+
extends: audioqna
38+
image: ${REGISTRY:-opea}/audioqna-ui:${TAG:-latest}-openeuler
2239
audioqna-multilang:
2340
build:
2441
context: ../
@@ -37,6 +54,12 @@ services:
3754
dockerfile: comps/third_parties/whisper/src/Dockerfile
3855
extends: audioqna
3956
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}
57+
whisper-openeuler:
58+
build:
59+
context: GenAIComps
60+
dockerfile: comps/third_parties/whisper/src/Dockerfile.openEuler
61+
extends: audioqna
62+
image: ${REGISTRY:-opea}/whisper:${TAG:-latest}-openeuler
4063
asr:
4164
build:
4265
context: GenAIComps
@@ -61,6 +84,12 @@ services:
6184
dockerfile: comps/third_parties/speecht5/src/Dockerfile
6285
extends: audioqna
6386
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}
87+
speecht5-openeuler:
88+
build:
89+
context: GenAIComps
90+
dockerfile: comps/third_parties/speecht5/src/Dockerfile.openEuler
91+
extends: audioqna
92+
image: ${REGISTRY:-opea}/speecht5:${TAG:-latest}-openeuler
6493
tts:
6594
build:
6695
context: GenAIComps
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
#!/bin/bash
2+
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
3+
# SPDX-License-Identifier: Apache-2.0
4+
5+
set -e
6+
IMAGE_REPO=${IMAGE_REPO:-"opea"}
7+
IMAGE_TAG=${IMAGE_TAG:-"latest"}
8+
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
9+
echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
10+
export REGISTRY=${IMAGE_REPO}
11+
export TAG=${IMAGE_TAG}
12+
export MODEL_CACHE=${model_cache:-"./data"}
13+
14+
WORKPATH=$(dirname "$PWD")
15+
LOG_PATH="$WORKPATH/tests"
16+
ip_address=$(hostname -I | awk '{print $1}')
17+
18+
function build_docker_images() {
19+
opea_branch=${opea_branch:-"main"}
20+
21+
cd $WORKPATH/docker_image_build
22+
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
23+
pushd GenAIComps
24+
echo "GenAIComps test commit is $(git rev-parse HEAD)"
25+
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG}-openeuler --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.openEuler .
26+
popd && sleep 1s
27+
28+
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
29+
service_list="audioqna-openeuler audioqna-ui-openeuler whisper-openeuler speecht5-openeuler"
30+
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
31+
32+
docker images && sleep 1s
33+
}
34+
35+
function start_services() {
36+
cd $WORKPATH/docker_compose/intel/cpu/xeon/
37+
export host_ip=${ip_address}
38+
source set_env.sh
39+
# sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env
40+
41+
# Start Docker Containers
42+
docker compose -f compose_openeuler.yaml up -d > ${LOG_PATH}/start_services_with_compose.log
43+
n=0
44+
until [[ "$n" -ge 200 ]]; do
45+
docker logs vllm-service > $LOG_PATH/vllm_service_start.log 2>&1
46+
if grep -q complete $LOG_PATH/vllm_service_start.log; then
47+
break
48+
fi
49+
sleep 5s
50+
n=$((n+1))
51+
done
52+
}
53+
54+
55+
function validate_megaservice() {
56+
response=$(http_proxy="" curl http://${ip_address}:3008/v1/audioqna -XPOST -d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA", "max_tokens":64}' -H 'Content-Type: application/json')
57+
# always print the log
58+
docker logs whisper-service > $LOG_PATH/whisper-service.log
59+
docker logs speecht5-service > $LOG_PATH/tts-service.log
60+
docker logs vllm-service > $LOG_PATH/vllm-service.log
61+
docker logs audioqna-xeon-backend-server > $LOG_PATH/audioqna-xeon-backend-server.log
62+
echo "$response" | sed 's/^"//;s/"$//' | base64 -d > speech.mp3
63+
64+
if [[ $(file speech.mp3) == *"RIFF"* ]]; then
65+
echo "Result correct."
66+
else
67+
echo "Result wrong."
68+
exit 1
69+
fi
70+
71+
}
72+
73+
function stop_docker() {
74+
cd $WORKPATH/docker_compose/intel/cpu/xeon/
75+
docker compose -f compose_openeuler.yaml stop && docker compose rm -f
76+
}
77+
78+
function main() {
79+
80+
echo "::group::stop_docker"
81+
stop_docker
82+
echo "::endgroup::"
83+
84+
echo "::group::build_docker_images"
85+
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
86+
echo "::endgroup::"
87+
88+
echo "::group::start_services"
89+
start_services
90+
echo "::endgroup::"
91+
92+
echo "::group::validate_megaservice"
93+
validate_megaservice
94+
echo "::endgroup::"
95+
96+
echo "::group::stop_docker"
97+
stop_docker
98+
docker system prune -f
99+
echo "::endgroup::"
100+
101+
}
102+
103+
main
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
# Copyright (C) 2025 Huawei Technologies Co., Ltd.
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
# Use node 20.11.1 as the base image
5+
FROM openeuler/node:20.11.1-oe2403lts
6+
7+
# Update package manager and install Git
8+
RUN yum update -y && \
9+
yum install -y \
10+
git && \
11+
yum clean all && \
12+
rm -rf /var/cache/yum
13+
14+
# Copy the front-end code repository
15+
COPY svelte /home/user/svelte
16+
17+
# Set the working directory
18+
WORKDIR /home/user/svelte
19+
20+
# Install front-end dependencies
21+
RUN npm install
22+
23+
# Build the front-end application
24+
RUN npm run build
25+
26+
# Expose the port of the front-end application
27+
EXPOSE 5173
28+
29+
# Run the front-end application in preview mode
30+
CMD ["npm", "run", "preview", "--", "--port", "5173", "--host", "0.0.0.0"]

0 commit comments

Comments
 (0)