From ebe6b473e9d0641cd5fa8e988839fe215dd5b328 Mon Sep 17 00:00:00 2001 From: Sihan Chen <39623753+Spycsh@users.noreply.github.com> Date: Fri, 6 Sep 2024 18:49:28 +0800 Subject: [PATCH] Add megaservice definition without microservice wrappers (#700) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ChatQnA/docker/Dockerfile_no_wrapper | 34 +++ ChatQnA/docker/chatqna_no_wrapper.py | 265 ++++++++++++++++++ ChatQnA/docker/docker_build_compose.yaml | 5 + ChatQnA/docker/gaudi/compose_no_wrapper.yaml | 197 +++++++++++++ ChatQnA/docker/xeon/compose_no_wrapper.yaml | 184 ++++++++++++ .../tests/test_chatqna_no_wrapper_on_gaudi.sh | 251 +++++++++++++++++ .../tests/test_chatqna_no_wrapper_on_xeon.sh | 256 +++++++++++++++++ 7 files changed, 1192 insertions(+) create mode 100644 ChatQnA/docker/Dockerfile_no_wrapper create mode 100644 ChatQnA/docker/chatqna_no_wrapper.py create mode 100644 ChatQnA/docker/gaudi/compose_no_wrapper.yaml create mode 100644 ChatQnA/docker/xeon/compose_no_wrapper.yaml create mode 100644 ChatQnA/tests/test_chatqna_no_wrapper_on_gaudi.sh create mode 100644 ChatQnA/tests/test_chatqna_no_wrapper_on_xeon.sh diff --git a/ChatQnA/docker/Dockerfile_no_wrapper b/ChatQnA/docker/Dockerfile_no_wrapper new file mode 100644 index 000000000..c6adacaee --- /dev/null +++ b/ChatQnA/docker/Dockerfile_no_wrapper @@ -0,0 +1,34 @@ + + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM python:3.11-slim + +RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \ + libgl1-mesa-glx \ + libjemalloc-dev \ + vim \ + git + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +WORKDIR /home/user/ +RUN git clone https://github.com/opea-project/GenAIComps.git + +WORKDIR /home/user/GenAIComps +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r /home/user/GenAIComps/requirements.txt && \ + pip install --no-cache-dir langchain_core + +COPY ./chatqna_no_wrapper.py /home/user/chatqna_no_wrapper.py + +ENV PYTHONPATH=$PYTHONPATH:/home/user/GenAIComps + +USER user + +WORKDIR /home/user + +ENTRYPOINT ["python", "chatqna_no_wrapper.py"] diff --git a/ChatQnA/docker/chatqna_no_wrapper.py b/ChatQnA/docker/chatqna_no_wrapper.py new file mode 100644 index 000000000..a2d007999 --- /dev/null +++ b/ChatQnA/docker/chatqna_no_wrapper.py @@ -0,0 +1,265 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import json +import os +import re + +from comps import ChatQnAGateway, MicroService, ServiceOrchestrator, ServiceType +from langchain_core.prompts import PromptTemplate + + +class ChatTemplate: + @staticmethod + def generate_rag_prompt(question, documents): + context_str = "\n".join(documents) + if context_str and len(re.findall("[\u4E00-\u9FFF]", context_str)) / len(context_str) >= 0.3: + # chinese context + template = """ +### 你将扮演一个乐于助人、尊重他人并诚实的助手,你的目标是帮助用户解答问题。有效地利用来自本地知识库的搜索结果。确保你的回答中只包含相关信息。如果你不确定问题的答案,请避免分享不准确的信息。 +### 搜索结果:{context} +### 问题:{question} +### 回答: +""" + else: + template = """ +### You are a helpful, respectful and honest assistant to help the user with questions. \ +Please refer to the search results obtained from the local knowledge base. \ +But be careful to not incorporate the information that you think is not relevant to the question. \ +If you don't know the answer to a question, please don't share false information. \n +### Search results: {context} \n +### Question: {question} \n +### Answer: +""" + return template.format(context=context_str, question=question) + + +MEGA_SERVICE_HOST_IP = os.getenv("MEGA_SERVICE_HOST_IP", "0.0.0.0") +MEGA_SERVICE_PORT = int(os.getenv("MEGA_SERVICE_PORT", 8888)) +# EMBEDDING_SERVICE_HOST_IP = os.getenv("EMBEDDING_SERVICE_HOST_IP", "0.0.0.0") +# EMBEDDING_SERVICE_PORT = int(os.getenv("EMBEDDING_SERVICE_PORT", 6000)) +# RETRIEVER_SERVICE_HOST_IP = os.getenv("RETRIEVER_SERVICE_HOST_IP", "0.0.0.0") +# RETRIEVER_SERVICE_PORT = int(os.getenv("RETRIEVER_SERVICE_PORT", 7000)) +# RERANK_SERVICE_HOST_IP = os.getenv("RERANK_SERVICE_HOST_IP", "0.0.0.0") +# RERANK_SERVICE_PORT = int(os.getenv("RERANK_SERVICE_PORT", 8000)) +# LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0") +# LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000)) +EMBEDDING_SERVER_HOST_IP = os.getenv("EMBEDDING_SERVER_HOST_IP", "0.0.0.0") +EMBEDDING_SERVER_PORT = int(os.getenv("EMBEDDING_SERVER_PORT", 6006)) +RETRIEVER_SERVICE_HOST_IP = os.getenv("RETRIEVER_SERVICE_HOST_IP", "0.0.0.0") +RETRIEVER_SERVICE_PORT = int(os.getenv("RETRIEVER_SERVICE_PORT", 7000)) +RERANK_SERVER_HOST_IP = os.getenv("RERANK_SERVER_HOST_IP", "0.0.0.0") +RERANK_SERVER_PORT = int(os.getenv("RERANK_SERVER_PORT", 8808)) +LLM_SERVER_HOST_IP = os.getenv("LLM_SERVER_HOST_IP", "0.0.0.0") +LLM_SERVER_PORT = int(os.getenv("LLM_SERVER_PORT", 9009)) + + +def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **kwargs): + if self.services[cur_node].service_type == ServiceType.EMBEDDING: + inputs["inputs"] = inputs["text"] + del inputs["text"] + elif self.services[cur_node].service_type == ServiceType.RETRIEVER: + # prepare the retriever params + retriever_parameters = kwargs.get("retriever_parameters", None) + if retriever_parameters: + inputs.update(retriever_parameters.dict()) + elif self.services[cur_node].service_type == ServiceType.LLM: + # convert TGI/vLLM to unified OpenAI /v1/chat/completions format + next_inputs = {} + next_inputs["model"] = "tgi" # specifically clarify the fake model to make the format unified + next_inputs["messages"] = [{"role": "user", "content": inputs["inputs"]}] + next_inputs["max_tokens"] = llm_parameters_dict["max_new_tokens"] + next_inputs["top_p"] = llm_parameters_dict["top_p"] + next_inputs["stream"] = inputs["streaming"] + next_inputs["frequency_penalty"] = inputs["repetition_penalty"] + next_inputs["temperature"] = inputs["temperature"] + inputs = next_inputs + + return inputs + + +def align_outputs(self, data, cur_node, inputs, runtime_graph, llm_parameters_dict, **kwargs): + next_data = {} + if self.services[cur_node].service_type == ServiceType.EMBEDDING: + assert isinstance(data, list) + next_data = {"text": inputs["inputs"], "embedding": data[0]} + elif self.services[cur_node].service_type == ServiceType.RETRIEVER: + + docs = [doc["text"] for doc in data["retrieved_docs"]] + + with_rerank = runtime_graph.downstream(cur_node)[0].startswith("rerank") + if with_rerank and docs: + # forward to rerank + # prepare inputs for rerank + next_data["query"] = data["initial_query"] + next_data["texts"] = [doc["text"] for doc in data["retrieved_docs"]] + else: + # forward to llm + if not docs: + # delete the rerank from retriever -> rerank -> llm + for ds in reversed(runtime_graph.downstream(cur_node)): + for nds in runtime_graph.downstream(ds): + runtime_graph.add_edge(cur_node, nds) + runtime_graph.delete_node_if_exists(ds) + + # handle template + # if user provides template, then format the prompt with it + # otherwise, use the default template + prompt = data["initial_query"] + chat_template = llm_parameters_dict["chat_template"] + if chat_template: + prompt_template = PromptTemplate.from_template(chat_template) + input_variables = prompt_template.input_variables + if sorted(input_variables) == ["context", "question"]: + prompt = prompt_template.format(question=data["initial_query"], context="\n".join(docs)) + elif input_variables == ["question"]: + prompt = prompt_template.format(question=data["initial_query"]) + else: + print(f"{prompt_template} not used, we only support 2 input variables ['question', 'context']") + prompt = ChatTemplate.generate_rag_prompt(data["initial_query"], docs) + else: + prompt = ChatTemplate.generate_rag_prompt(data["initial_query"], docs) + + next_data["inputs"] = prompt + + elif self.services[cur_node].service_type == ServiceType.RERANK: + # rerank the inputs with the scores + reranker_parameters = kwargs.get("reranker_parameters", None) + top_n = reranker_parameters.top_n if reranker_parameters else 1 + docs = inputs["texts"] + reranked_docs = [] + for best_response in data[:top_n]: + reranked_docs.append(docs[best_response["index"]]) + + # handle template + # if user provides template, then format the prompt with it + # otherwise, use the default template + prompt = inputs["query"] + chat_template = llm_parameters_dict["chat_template"] + if chat_template: + prompt_template = PromptTemplate.from_template(chat_template) + input_variables = prompt_template.input_variables + if sorted(input_variables) == ["context", "question"]: + prompt = prompt_template.format(question=prompt, context="\n".join(docs)) + elif input_variables == ["question"]: + prompt = prompt_template.format(question=prompt) + else: + print(f"{prompt_template} not used, we only support 2 input variables ['question', 'context']") + prompt = ChatTemplate.generate_rag_prompt(prompt, docs) + else: + prompt = ChatTemplate.generate_rag_prompt(prompt, docs) + + next_data["inputs"] = prompt + + return next_data + + +def align_generator(self, gen, **kwargs): + # openai reaponse format + # b'data:{"id":"","object":"text_completion","created":1725530204,"model":"meta-llama/Meta-Llama-3-8B-Instruct","system_fingerprint":"2.0.1-native","choices":[{"index":0,"delta":{"role":"assistant","content":"?"},"logprobs":null,"finish_reason":null}]}\n\n' + for line in gen: + line = line.decode("utf-8") + start = line.find("{") + end = line.rfind("}") + 1 + + json_str = line[start:end] + try: + # sometimes yield empty chunk, do a fallback here + json_data = json.loads(json_str) + if json_data["choices"][0]["finish_reason"] != "eos_token": + yield f"data: {repr(json_data['choices'][0]['delta']['content'].encode('utf-8'))}\n\n" + except Exception as e: + yield f"data: {repr(json_str.encode('utf-8'))}\n\n" + yield "data: [DONE]\n\n" + + +class ChatQnAService: + def __init__(self, host="0.0.0.0", port=8000): + self.host = host + self.port = port + ServiceOrchestrator.align_inputs = align_inputs + ServiceOrchestrator.align_outputs = align_outputs + ServiceOrchestrator.align_generator = align_generator + self.megaservice = ServiceOrchestrator() + + def add_remote_service(self): + + embedding = MicroService( + name="embedding", + host=EMBEDDING_SERVER_HOST_IP, + port=EMBEDDING_SERVER_PORT, + endpoint="/embed", + use_remote_service=True, + service_type=ServiceType.EMBEDDING, + ) + + retriever = MicroService( + name="retriever", + host=RETRIEVER_SERVICE_HOST_IP, + port=RETRIEVER_SERVICE_PORT, + endpoint="/v1/retrieval", + use_remote_service=True, + service_type=ServiceType.RETRIEVER, + ) + + rerank = MicroService( + name="rerank", + host=RERANK_SERVER_HOST_IP, + port=RERANK_SERVER_PORT, + endpoint="/rerank", + use_remote_service=True, + service_type=ServiceType.RERANK, + ) + + llm = MicroService( + name="llm", + host=LLM_SERVER_HOST_IP, + port=LLM_SERVER_PORT, + endpoint="/v1/chat/completions", + use_remote_service=True, + service_type=ServiceType.LLM, + ) + self.megaservice.add(embedding).add(retriever).add(rerank).add(llm) + self.megaservice.flow_to(embedding, retriever) + self.megaservice.flow_to(retriever, rerank) + self.megaservice.flow_to(rerank, llm) + self.gateway = ChatQnAGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port) + + def add_remote_service_without_rerank(self): + + embedding = MicroService( + name="embedding", + host=EMBEDDING_SERVER_HOST_IP, + port=EMBEDDING_SERVER_PORT, + endpoint="/embed", + use_remote_service=True, + service_type=ServiceType.EMBEDDING, + ) + + retriever = MicroService( + name="retriever", + host=RETRIEVER_SERVICE_HOST_IP, + port=RETRIEVER_SERVICE_PORT, + endpoint="/v1/retrieval", + use_remote_service=True, + service_type=ServiceType.RETRIEVER, + ) + + llm = MicroService( + name="llm", + host=LLM_SERVER_HOST_IP, + port=LLM_SERVER_PORT, + endpoint="/v1/chat/completions", + use_remote_service=True, + service_type=ServiceType.LLM, + ) + self.megaservice.add(embedding).add(retriever).add(llm) + self.megaservice.flow_to(embedding, retriever) + self.megaservice.flow_to(retriever, llm) + self.gateway = ChatQnAGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port) + + +if __name__ == "__main__": + chatqna = ChatQnAService(host=MEGA_SERVICE_HOST_IP, port=MEGA_SERVICE_PORT) + chatqna.add_remote_service() + # chatqna.add_remote_service_without_rerank() diff --git a/ChatQnA/docker/docker_build_compose.yaml b/ChatQnA/docker/docker_build_compose.yaml index 7011167ad..0d9200653 100644 --- a/ChatQnA/docker/docker_build_compose.yaml +++ b/ChatQnA/docker/docker_build_compose.yaml @@ -20,6 +20,11 @@ services: dockerfile: ./Dockerfile_without_rerank extends: chatqna image: ${REGISTRY:-opea}/chatqna-without-rerank:${TAG:-latest} + chatqna-no-wrapper: + build: + dockerfile: ./Dockerfile_no_wrapper + extends: chatqna + image: ${REGISTRY:-opea}/chatqna-no-wrapper:${TAG:-latest} chatqna-ui: build: context: ui diff --git a/ChatQnA/docker/gaudi/compose_no_wrapper.yaml b/ChatQnA/docker/gaudi/compose_no_wrapper.yaml new file mode 100644 index 000000000..cad1adfdd --- /dev/null +++ b/ChatQnA/docker/gaudi/compose_no_wrapper.yaml @@ -0,0 +1,197 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +services: + redis-vector-db: + image: redis/redis-stack:7.2.0-v9 + container_name: redis-vector-db + ports: + - "6379:6379" + - "8001:8001" + dataprep-redis-service: + image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest} + container_name: dataprep-redis-server + depends_on: + - redis-vector-db + - tei-embedding-service + ports: + - "6007:6007" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + REDIS_URL: ${REDIS_URL} + INDEX_NAME: ${INDEX_NAME} + TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + tei-embedding-service: + image: ${REGISTRY:-opea}/tei-gaudi:${TAG:-latest} + container_name: tei-embedding-gaudi-server + ports: + - "8090:80" + volumes: + - "./data:/data" + runtime: habana + cap_add: + - SYS_NICE + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HABANA_VISIBLE_DEVICES: all + OMPI_MCA_btl_vader_single_copy_mechanism: none + MAX_WARMUP_SEQUENCE_LENGTH: 512 + INIT_HCCL_ON_ACQUIRE: 0 + ENABLE_EXPERIMENTAL_FLAGS: true + command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate + # embedding: + # image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest} + # container_name: embedding-tei-server + # depends_on: + # - tei-embedding-service + # ports: + # - "6000:6000" + # ipc: host + # environment: + # no_proxy: ${no_proxy} + # http_proxy: ${http_proxy} + # https_proxy: ${https_proxy} + # TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + # restart: unless-stopped + retriever: + image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest} + container_name: retriever-redis-server + depends_on: + - redis-vector-db + ports: + - "7000:7000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + REDIS_URL: ${REDIS_URL} + INDEX_NAME: ${INDEX_NAME} + restart: unless-stopped + tei-reranking-service: + image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 + container_name: tei-reranking-gaudi-server + ports: + - "8808:80" + volumes: + - "./data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + HF_HUB_DISABLE_PROGRESS_BARS: 1 + HF_HUB_ENABLE_HF_TRANSFER: 0 + command: --model-id ${RERANK_MODEL_ID} --auto-truncate + # reranking: + # image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest} + # container_name: reranking-tei-gaudi-server + # depends_on: + # - tei-reranking-service + # ports: + # - "8000:8000" + # ipc: host + # environment: + # no_proxy: ${no_proxy} + # http_proxy: ${http_proxy} + # https_proxy: ${https_proxy} + # TEI_RERANKING_ENDPOINT: ${TEI_RERANKING_ENDPOINT} + # HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + # HF_HUB_DISABLE_PROGRESS_BARS: 1 + # HF_HUB_ENABLE_HF_TRANSFER: 0 + # restart: unless-stopped + tgi-service: + image: ghcr.io/huggingface/tgi-gaudi:2.0.1 + container_name: tgi-gaudi-server + ports: + - "8005:80" + volumes: + - "./data:/data" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + HF_HUB_DISABLE_PROGRESS_BARS: 1 + HF_HUB_ENABLE_HF_TRANSFER: 0 + HABANA_VISIBLE_DEVICES: all + OMPI_MCA_btl_vader_single_copy_mechanism: none + runtime: habana + cap_add: + - SYS_NICE + ipc: host + command: --model-id ${LLM_MODEL_ID} --max-input-length 2048 --max-total-tokens 4096 + # llm: + # image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} + # container_name: llm-tgi-gaudi-server + # depends_on: + # - tgi-service + # ports: + # - "9000:9000" + # ipc: host + # environment: + # no_proxy: ${no_proxy} + # http_proxy: ${http_proxy} + # https_proxy: ${https_proxy} + # TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + # HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + # HF_HUB_DISABLE_PROGRESS_BARS: 1 + # HF_HUB_ENABLE_HF_TRANSFER: 0 + # restart: unless-stopped + chaqna-gaudi-backend-server: + image: ${REGISTRY:-opea}/chatqna-no-wrapper:${TAG:-latest} + container_name: chatqna-gaudi-backend-server + depends_on: + - redis-vector-db + - tei-embedding-service + # - embedding + - retriever + - tei-reranking-service + # - reranking + - tgi-service + # - llm + ports: + - "8888:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - EMBEDDING_SERVER_HOST_IP=${EMBEDDING_SERVER_HOST_IP} + - EMBEDDING_SERVER_PORT=${EMBEDDING_SERVER_PORT:-8090} + - RETRIEVER_SERVICE_HOST_IP=${RETRIEVER_SERVICE_HOST_IP} + - RERANK_SERVER_HOST_IP=${RERANK_SERVER_HOST_IP} + - RERANK_SERVER_PORT=${RERANK_SERVER_PORT:-8808} + - LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP} + - LLM_SERVER_PORT=${LLM_SERVER_PORT:-8005} + - LOGFLAG=${LOGFLAG} + ipc: host + restart: always + chaqna-gaudi-ui-server: + image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest} + container_name: chatqna-gaudi-ui-server + depends_on: + - chaqna-gaudi-backend-server + ports: + - "5173:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - CHAT_BASE_URL=${BACKEND_SERVICE_ENDPOINT} + - UPLOAD_FILE_BASE_URL=${DATAPREP_SERVICE_ENDPOINT} + - GET_FILE=${DATAPREP_GET_FILE_ENDPOINT} + - DELETE_FILE=${DATAPREP_DELETE_FILE_ENDPOINT} + ipc: host + restart: always + +networks: + default: + driver: bridge diff --git a/ChatQnA/docker/xeon/compose_no_wrapper.yaml b/ChatQnA/docker/xeon/compose_no_wrapper.yaml new file mode 100644 index 000000000..317a206fb --- /dev/null +++ b/ChatQnA/docker/xeon/compose_no_wrapper.yaml @@ -0,0 +1,184 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +services: + redis-vector-db: + image: redis/redis-stack:7.2.0-v9 + container_name: redis-vector-db + ports: + - "6379:6379" + - "8001:8001" + dataprep-redis-service: + image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest} + container_name: dataprep-redis-server + depends_on: + - redis-vector-db + - tei-embedding-service + ports: + - "6007:6007" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + REDIS_URL: ${REDIS_URL} + REDIS_HOST: ${REDIS_HOST} + INDEX_NAME: ${INDEX_NAME} + TEI_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + tei-embedding-service: + image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 + container_name: tei-embedding-server + ports: + - "6006:80" + volumes: + - "./data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate + # embedding: + # image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest} + # container_name: embedding-tei-server + # depends_on: + # - tei-embedding-service + # ports: + # - "6000:6000" + # ipc: host + # environment: + # no_proxy: ${no_proxy} + # http_proxy: ${http_proxy} + # https_proxy: ${https_proxy} + # TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + # restart: unless-stopped + retriever: + image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest} + container_name: retriever-redis-server + depends_on: + - redis-vector-db + ports: + - "7000:7000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + REDIS_URL: ${REDIS_URL} + INDEX_NAME: ${INDEX_NAME} + TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + restart: unless-stopped + tei-reranking-service: + image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 + container_name: tei-reranking-server + ports: + - "8808:80" + volumes: + - "./data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + HF_HUB_DISABLE_PROGRESS_BARS: 1 + HF_HUB_ENABLE_HF_TRANSFER: 0 + command: --model-id ${RERANK_MODEL_ID} --auto-truncate + # reranking: + # image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest} + # container_name: reranking-tei-xeon-server + # depends_on: + # - tei-reranking-service + # ports: + # - "8000:8000" + # ipc: host + # environment: + # no_proxy: ${no_proxy} + # http_proxy: ${http_proxy} + # https_proxy: ${https_proxy} + # TEI_RERANKING_ENDPOINT: ${TEI_RERANKING_ENDPOINT} + # HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + # HF_HUB_DISABLE_PROGRESS_BARS: 1 + # HF_HUB_ENABLE_HF_TRANSFER: 0 + # restart: unless-stopped + tgi-service: + image: ghcr.io/huggingface/text-generation-inference:sha-e4201f4-intel-cpu + container_name: tgi-service + ports: + - "9009:80" + volumes: + - "./data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + HF_HUB_DISABLE_PROGRESS_BARS: 1 + HF_HUB_ENABLE_HF_TRANSFER: 0 + command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 + # llm: + # image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} + # container_name: llm-tgi-server + # depends_on: + # - tgi-service + # ports: + # - "9000:9000" + # ipc: host + # environment: + # no_proxy: ${no_proxy} + # http_proxy: ${http_proxy} + # https_proxy: ${https_proxy} + # TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + # HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + # HF_HUB_DISABLE_PROGRESS_BARS: 1 + # HF_HUB_ENABLE_HF_TRANSFER: 0 + # restart: unless-stopped + chaqna-xeon-backend-server: + image: ${REGISTRY:-opea}/chatqna-no-wrapper:${TAG:-latest} + container_name: chatqna-xeon-backend-server + depends_on: + - redis-vector-db + - tei-embedding-service + # - embedding + - dataprep-redis-service + - retriever + - tei-reranking-service + # - reranking + - tgi-service + # - llm + ports: + - "8888:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - EMBEDDING_SERVER_HOST_IP=${EMBEDDING_SERVER_HOST_IP} + - RETRIEVER_SERVICE_HOST_IP=${RETRIEVER_SERVICE_HOST_IP} + - RERANK_SERVER_HOST_IP=${RERANK_SERVER_HOST_IP} + - LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP} + ipc: host + restart: always + chaqna-xeon-ui-server: + image: ${REGISTRY:-opea}/chatqna-ui:${TAG:-latest} + container_name: chatqna-xeon-ui-server + depends_on: + - chaqna-xeon-backend-server + ports: + - "5173:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - CHAT_BASE_URL=${BACKEND_SERVICE_ENDPOINT} + - UPLOAD_FILE_BASE_URL=${DATAPREP_SERVICE_ENDPOINT} + - GET_FILE=${DATAPREP_GET_FILE_ENDPOINT} + - DELETE_FILE=${DATAPREP_DELETE_FILE_ENDPOINT} + ipc: host + restart: always + +networks: + default: + driver: bridge diff --git a/ChatQnA/tests/test_chatqna_no_wrapper_on_gaudi.sh b/ChatQnA/tests/test_chatqna_no_wrapper_on_gaudi.sh new file mode 100644 index 000000000..2bc2e7d89 --- /dev/null +++ b/ChatQnA/tests/test_chatqna_no_wrapper_on_gaudi.sh @@ -0,0 +1,251 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -e +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + cd $WORKPATH/docker + git clone https://github.com/opea-project/GenAIComps.git + git clone https://github.com/huggingface/tei-gaudi + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="chatqna-no-wrapper chatqna-ui dataprep-redis retriever-redis tei-gaudi" + docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + + docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 + docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 + + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker/gaudi + export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" + export RERANK_MODEL_ID="BAAI/bge-reranker-base" + export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" + export TEI_EMBEDDING_ENDPOINT="http://${ip_address}:8090" + export TEI_RERANKING_ENDPOINT="http://${ip_address}:8808" + export TGI_LLM_ENDPOINT="http://${ip_address}:8005" + export REDIS_URL="redis://${ip_address}:6379" + export REDIS_HOST=${ip_address} + export INDEX_NAME="rag-redis" + export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + export MEGA_SERVICE_HOST_IP=${ip_address} + export EMBEDDING_SERVER_HOST_IP=${ip_address} + export RETRIEVER_SERVICE_HOST_IP=${ip_address} + export RERANK_SERVER_HOST_IP=${ip_address} + export LLM_SERVER_HOST_IP=${ip_address} + export EMBEDDING_SERVER_PORT=8090 + export RERANK_SERVER_PORT=8808 + export LLM_SERVER_PORT=8005 + export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/chatqna" + export DATAPREP_SERVICE_ENDPOINT="http://${ip_address}:6007/v1/dataprep" + export DATAPREP_GET_FILE_ENDPOINT="http://${ip_address}:6008/v1/dataprep/get_file" + export DATAPREP_DELETE_FILE_ENDPOINT="http://${ip_address}:6009/v1/dataprep/delete_file" + + sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env + + # Start Docker Containers + docker compose -f compose_no_wrapper.yaml up -d > ${LOG_PATH}/start_services_with_compose.log + + n=0 + until [[ "$n" -ge 500 ]]; do + docker logs tgi-gaudi-server > ${LOG_PATH}/tgi_service_start.log + if grep -q Connected ${LOG_PATH}/tgi_service_start.log; then + break + fi + sleep 1s + n=$((n+1)) + done +} + +function validate_service() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + if [[ $SERVICE_NAME == *"dataprep_upload_file"* ]]; then + cd $LOG_PATH + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F 'files=@./dataprep_file.txt' -H 'Content-Type: multipart/form-data' "$URL") + elif [[ $SERVICE_NAME == *"dataprep_upload_link"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F 'link_list=["https://www.ces.tech/"]' "$URL") + elif [[ $SERVICE_NAME == *"dataprep_get"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -H 'Content-Type: application/json' "$URL") + elif [[ $SERVICE_NAME == *"dataprep_del"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d '{"file_path": "all"}' -H 'Content-Type: application/json' "$URL") + else + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + fi + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') + + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + + # check response status + if [ "$HTTP_STATUS" -ne "200" ]; then + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + fi + # check response body + if [[ "$RESPONSE_BODY" != *"$EXPECTED_RESULT"* ]]; then + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + + sleep 1s +} + +function validate_microservices() { + # Check if the microservices are running correctly. + + # tei for embedding service + validate_service \ + "${ip_address}:8090/embed" \ + "[[" \ + "tei-embedding" \ + "tei-embedding-gaudi-server" \ + '{"inputs":"What is Deep Learning?"}' + + sleep 1m # retrieval can't curl as expected, try to wait for more time + + # test /v1/dataprep upload file + echo "Deep learning is a subset of machine learning that utilizes neural networks with multiple layers to analyze various levels of abstract data representations. It enables computers to identify patterns and make decisions with minimal human intervention by learning from large amounts of data." > $LOG_PATH/dataprep_file.txt + validate_service \ + "http://${ip_address}:6007/v1/dataprep" \ + "Data preparation succeeded" \ + "dataprep_upload_file" \ + "dataprep-redis-server" + + # test /v1/dataprep upload link + validate_service \ + "http://${ip_address}:6007/v1/dataprep" \ + "Data preparation succeeded" \ + "dataprep_upload_link" \ + "dataprep-redis-server" + + # test /v1/dataprep/get_file + validate_service \ + "http://${ip_address}:6007/v1/dataprep/get_file" \ + '{"name":' \ + "dataprep_get" \ + "dataprep-redis-server" + + # test /v1/dataprep/delete_file + validate_service \ + "http://${ip_address}:6007/v1/dataprep/delete_file" \ + '{"status":true}' \ + "dataprep_del" \ + "dataprep-redis-server" + + # retrieval microservice + test_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)") + validate_service \ + "${ip_address}:7000/v1/retrieval" \ + "retrieved_docs" \ + "retrieval-microservice" \ + "retriever-redis-server" \ + "{\"text\":\"What is the revenue of Nike in 2023?\",\"embedding\":${test_embedding}}" + + # tei for rerank microservice + validate_service \ + "${ip_address}:8808/rerank" \ + '{"index":1,"score":' \ + "tei-rerank" \ + "tei-reranking-gaudi-server" \ + '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' + + # tgi for llm service + validate_service \ + "${ip_address}:8005/generate" \ + "generated_text" \ + "tgi-llm" \ + "tgi-gaudi-server" \ + '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' + +} + +function validate_megaservice() { + # Curl the Mega Service + validate_service \ + "${ip_address}:8888/v1/chatqna" \ + "data: " \ + "chatqna-megaservice" \ + "chatqna-gaudi-backend-server" \ + '{"messages": "What is the revenue of Nike in 2023?"}' + +} + +function validate_frontend() { + cd $WORKPATH/docker/ui/svelte + local conda_env_name="OPEA_e2e" + export PATH=${HOME}/miniforge3/bin/:$PATH + if conda info --envs | grep -q "$conda_env_name"; then + echo "$conda_env_name exist!" + else + conda create -n ${conda_env_name} python=3.12 -y + fi + source activate ${conda_env_name} + + sed -i "s/localhost/$ip_address/g" playwright.config.ts + + conda install -c conda-forge nodejs -y + npm install && npm ci && npx playwright install --with-deps + node -v && npm -v && pip list + + exit_status=0 + npx playwright test || exit_status=$? + + if [ $exit_status -ne 0 ]; then + echo "[TEST INFO]: ---------frontend test failed---------" + exit $exit_status + else + echo "[TEST INFO]: ---------frontend test passed---------" + fi +} + +function stop_docker() { + cd $WORKPATH/docker/gaudi + docker compose stop && docker compose rm -f +} + +function main() { + + stop_docker + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + start_time=$(date +%s) + start_services + end_time=$(date +%s) + duration=$((end_time-start_time)) + echo "Mega service start duration is $duration s" + + if [ "${mode}" == "perf" ]; then + python3 $WORKPATH/tests/chatqna_benchmark.py + elif [ "${mode}" == "" ]; then + validate_microservices + validate_megaservice + # validate_frontend + fi + + stop_docker + echo y | docker system prune + +} + +main diff --git a/ChatQnA/tests/test_chatqna_no_wrapper_on_xeon.sh b/ChatQnA/tests/test_chatqna_no_wrapper_on_xeon.sh new file mode 100644 index 000000000..e0d669f9b --- /dev/null +++ b/ChatQnA/tests/test_chatqna_no_wrapper_on_xeon.sh @@ -0,0 +1,256 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -e +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') + +function build_docker_images() { + cd $WORKPATH/docker + git clone https://github.com/opea-project/GenAIComps.git + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="chatqna-no-wrapper chatqna-ui chatqna-conversation-ui dataprep-redis retriever-redis" + docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + + docker pull ghcr.io/huggingface/tgi-gaudi:2.0.1 + docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 + + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker/xeon + + export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" + export RERANK_MODEL_ID="BAAI/bge-reranker-base" + export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" + export TEI_EMBEDDING_ENDPOINT="http://${ip_address}:6006" + export TEI_RERANKING_ENDPOINT="http://${ip_address}:8808" + export TGI_LLM_ENDPOINT="http://${ip_address}:9009" + export REDIS_URL="redis://${ip_address}:6379" + export REDIS_HOST=${ip_address} + export INDEX_NAME="rag-redis" + export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + export MEGA_SERVICE_HOST_IP=${ip_address} + export EMBEDDING_SERVER_HOST_IP=${ip_address} + export RETRIEVER_SERVICE_HOST_IP=${ip_address} + export RERANK_SERVER_HOST_IP=${ip_address} + export LLM_SERVER_HOST_IP=${ip_address} + export EMBEDDING_SERVER_PORT=6006 + export RERANK_SERVER_PORT=8808 + export LLM_SERVER_PORT=9009 + export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:8888/v1/chatqna" + export DATAPREP_SERVICE_ENDPOINT="http://${ip_address}:6007/v1/dataprep" + export DATAPREP_GET_FILE_ENDPOINT="http://${ip_address}:6007/v1/dataprep/get_file" + export DATAPREP_DELETE_FILE_ENDPOINT="http://${ip_address}:6007/v1/dataprep/delete_file" + + sed -i "s/backend_address/$ip_address/g" $WORKPATH/docker/ui/svelte/.env + + # Start Docker Containers + docker compose -f compose_no_wrapper.yaml up -d > ${LOG_PATH}/start_services_with_compose.log + + n=0 + until [[ "$n" -ge 500 ]]; do + docker logs tgi-service > ${LOG_PATH}/tgi_service_start.log + if grep -q Connected ${LOG_PATH}/tgi_service_start.log; then + break + fi + sleep 1s + n=$((n+1)) + done +} + +function validate_service() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + if [[ $SERVICE_NAME == *"dataprep_upload_file"* ]]; then + cd $LOG_PATH + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F 'files=@./dataprep_file.txt' -H 'Content-Type: multipart/form-data' "$URL") + elif [[ $SERVICE_NAME == *"dataprep_upload_link"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F 'link_list=["https://www.ces.tech/"]' "$URL") + elif [[ $SERVICE_NAME == *"dataprep_get"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -H 'Content-Type: application/json' "$URL") + elif [[ $SERVICE_NAME == *"dataprep_del"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d '{"file_path": "all"}' -H 'Content-Type: application/json' "$URL") + else + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + fi + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') + + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + + # check response status + if [ "$HTTP_STATUS" -ne "200" ]; then + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + fi + # check response body + if [[ "$RESPONSE_BODY" != *"$EXPECTED_RESULT"* ]]; then + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + + sleep 1s +} + +function validate_microservices() { + # Check if the microservices are running correctly. + + # tei for embedding service + validate_service \ + "${ip_address}:6006/embed" \ + "[[" \ + "tei-embedding" \ + "tei-embedding-server" \ + '{"inputs":"What is Deep Learning?"}' + + sleep 1m # retrieval can't curl as expected, try to wait for more time + + # test /v1/dataprep upload file + echo "Deep learning is a subset of machine learning that utilizes neural networks with multiple layers to analyze various levels of abstract data representations. It enables computers to identify patterns and make decisions with minimal human intervention by learning from large amounts of data." > $LOG_PATH/dataprep_file.txt + validate_service \ + "http://${ip_address}:6007/v1/dataprep" \ + "Data preparation succeeded" \ + "dataprep_upload_file" \ + "dataprep-redis-server" + + # test /v1/dataprep upload link + validate_service \ + "http://${ip_address}:6007/v1/dataprep" \ + "Data preparation succeeded" \ + "dataprep_upload_link" \ + "dataprep-redis-server" + + # test /v1/dataprep/get_file + validate_service \ + "http://${ip_address}:6007/v1/dataprep/get_file" \ + '{"name":' \ + "dataprep_get" \ + "dataprep-redis-server" + + # test /v1/dataprep/delete_file + validate_service \ + "http://${ip_address}:6007/v1/dataprep/delete_file" \ + '{"status":true}' \ + "dataprep_del" \ + "dataprep-redis-server" + + # retrieval microservice + test_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)") + validate_service \ + "${ip_address}:7000/v1/retrieval" \ + "retrieved_docs" \ + "retrieval-microservice" \ + "retriever-redis-server" \ + "{\"text\":\"What is the revenue of Nike in 2023?\",\"embedding\":${test_embedding}}" + + # tei for rerank microservice + validate_service \ + "${ip_address}:8808/rerank" \ + '{"index":1,"score":' \ + "tei-rerank" \ + "tei-reranking-server" \ + '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' + + # tgi for llm service + validate_service \ + "${ip_address}:9009/generate" \ + "generated_text" \ + "tgi-llm" \ + "tgi-service" \ + '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' + +} + +function validate_megaservice() { + # Curl the Mega Service + validate_service \ + "${ip_address}:8888/v1/chatqna" \ + "data: " \ + "chatqna-megaservice" \ + "chatqna-xeon-backend-server" \ + '{"messages": "What is the revenue of Nike in 2023?"}' + +} + +function validate_frontend() { + echo "[ TEST INFO ]: --------- frontend test started ---------" + cd $WORKPATH/docker/ui/svelte + local conda_env_name="OPEA_e2e" + export PATH=${HOME}/miniforge3/bin/:$PATH + if conda info --envs | grep -q "$conda_env_name"; then + echo "$conda_env_name exist!" + else + conda create -n ${conda_env_name} python=3.12 -y + fi + source activate ${conda_env_name} + echo "[ TEST INFO ]: --------- conda env activated ---------" + + sed -i "s/localhost/$ip_address/g" playwright.config.ts + + conda install -c conda-forge nodejs -y + npm install && npm ci && npx playwright install --with-deps + node -v && npm -v && pip list + + exit_status=0 + npx playwright test || exit_status=$? + + if [ $exit_status -ne 0 ]; then + echo "[TEST INFO]: ---------frontend test failed---------" + exit $exit_status + else + echo "[TEST INFO]: ---------frontend test passed---------" + fi +} + +function stop_docker() { + cd $WORKPATH/docker/xeon + docker compose stop && docker compose rm -f +} + +function main() { + + stop_docker + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + start_time=$(date +%s) + start_services + end_time=$(date +%s) + duration=$((end_time-start_time)) + echo "Mega service start duration is $duration s" && sleep 1s + + if [ "${mode}" == "perf" ]; then + python3 $WORKPATH/tests/chatqna_benchmark.py + elif [ "${mode}" == "" ]; then + validate_microservices + echo "==== microservices validated ====" + validate_megaservice + echo "==== megaservice validated ====" + # validate_frontend + # echo "==== frontend validated ====" + fi + + stop_docker + echo y | docker system prune + +} + +main