Skip to content

Commit

Permalink
Fix Reorg Issues (#657)
Browse files Browse the repository at this point in the history
Signed-off-by: letonghan <letong.han@intel.com>
  • Loading branch information
letonghan authored Sep 10, 2024
1 parent bea9bb0 commit a3da7c1
Show file tree
Hide file tree
Showing 5 changed files with 13 additions and 4 deletions.
1 change: 0 additions & 1 deletion comps/intent_detection/langchain/intent_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

from langchain import LLMChain, PromptTemplate
from langchain_community.llms import HuggingFaceEndpoint
from template import IntentTemplate

from comps import GeneratedDoc, LLMParamsDoc, ServiceType, opea_microservices, register_microservice

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ fi

# Build the docker image for vLLM based on the hardware mode
if [ "$hw_mode" = "hpu" ]; then
docker build -f docker/Dockerfile.Intel_HPU -t opea/vllm:hpu --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
docker build -f docker/Dockerfile.intel_hpu -t opea/vllm:hpu --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
else
git clone https://github.com/vllm-project/vllm.git
cd ./vllm/
Expand Down
10 changes: 10 additions & 0 deletions comps/lvms/tgi-llava/template.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0


class ChatTemplate:

@staticmethod
def generate_multimodal_rag_on_videos_prompt(question: str, context: str):
template = """The transcript associated with the image is '{context}'. {question}"""
return template.format(context=context, question=question)
2 changes: 1 addition & 1 deletion tests/embeddings/test_embeddings_tei_langchain.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ function build_docker_images() {
}

function start_service() {
tei_endpoint=5001=
tei_endpoint=5001
model="BAAI/bge-base-en-v1.5"
unset http_proxy
docker run -d --name="test-comps-embedding-tei-endpoint" -p $tei_endpoint:80 -v ./data:/data --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model
Expand Down
2 changes: 1 addition & 1 deletion tests/intent_detection/test_intent_detection_langchain.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ function start_service() {

function validate_microservice() {
intent_port=5043
result=$(http_proxy="" curl http://${ip_address}:${intent_port}/v1/chat/intent\
result=$(http_proxy="" curl http://localhost:${intent_port}/v1/chat/intent\
-X POST \
-d '{"query":"What is Deep Learning?","max_new_tokens":10,"top_k":1,"temperature":0.001,"streaming":false}' \
-H 'Content-Type: application/json')
Expand Down

0 comments on commit a3da7c1

Please sign in to comment.