[Bug]: gptcache似乎不支持get-turbo-3.5-16k聊天生成,

fhity93d  于 5个月前  发布在  其他
关注(0)|答案(4)|浏览(114)

当前行为
当我尝试将gptcache用作langchan缓存时,发现了以下错误信息:

File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/chains/base.py", line 361, in acall
    raise e
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/chains/base.py", line 355, in acall
    await self._acall(inputs, run_manager=run_manager)
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/agents/agent.py", line 1088, in _acall
    next_step_output = await self._atake_next_step(
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/agents/agent.py", line 932, in _atake_next_step
    output = await self.agent.aplan(
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/agents/agent.py", line 477, in aplan
    full_output = await self.llm_chain.apredict(callbacks=callbacks, **full_inputs)
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/chains/llm.py", line 272, in apredict
    return (await self.acall(kwargs, callbacks=callbacks))[self.output_key]
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/chains/base.py", line 361, in acall
    raise e
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/chains/base.py", line 355, in acall
    await self._acall(inputs, run_manager=run_manager)
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/chains/llm.py", line 237, in _acall
    response = await self.agenerate([inputs], run_manager=run_manager)
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/chains/llm.py", line 115, in agenerate
    return await self.llm.agenerate_prompt(
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/chat_models/base.py", line 424, in agenerate_prompt
    return await self.agenerate(
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/chat_models/base.py", line 384, in agenerate
    raise exceptions[0]
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/chat_models/base.py", line 495, in _agenerate_with_cache
    return ChatResult(generations=cache_val)
  File "pydantic/main.py", line 339, in pydantic.main.BaseModel.__init__
  File "pydantic/main.py", line 1076, in pydantic.main.validate_model
  File "pydantic/fields.py", line 895, in pydantic.fields.ModelField.validate
  File "pydantic/fields.py", line 928, in pydantic.fields.ModelField._validate_sequence_like
  File "pydantic/fields.py", line 1094, in pydantic.fields.ModelField._validate_singleton
  File "pydantic/fields.py", line 884, in pydantic.fields.ModelField.validate
  File "pydantic/fields.py", line 1101, in pydantic.fields.ModelField._validate_singleton
  File "pydantic/fields.py", line 1157, in pydantic.fields.ModelField._apply_validators
  File "pydantic/class_validators.py", line 337, in pydantic.class_validators._generic_validator_basic.lambda13
  File "pydantic/main.py", line 719, in pydantic.main.BaseModel.validate
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/load/serializable.py", line 75, in __init__
    super().__init__(**kwargs)
  File "pydantic/main.py", line 339, in pydantic.main.BaseModel.__init__
  File "pydantic/main.py", line 1102, in pydantic.main.validate_model
  File "/Users/xxx/Library/Python/3.9/lib/python/site-packages/langchain/schema/output.py", line 61, in set_text
    values["text"] = values["message"].content
KeyError: 'message'

似乎当前的gptcache不支持聊天生成相关的API。我想确认如何处理和修复它?

环境

gptcache==0.1.39.1
langchain==0.0.281
  • CacheBase --> postgresql
  • VectorBase --> pgvector
  • Azure OpenAI模型 --> gpt-35-turbo-16k

初始化缓存示例代码

from langchain.cache import GPTCache
from gptcache.adapter.api import init_similar_cache

init_similar_cache(
        cache_obj=cache_obj,
        data_dir=f"similar_cache_{hashed_llm}",
        embedding=openai_embedding,
        data_manager=data_manager,
)

langchain.llm_cache = GPTCache(init_llm_cache)

预期行为

  • 无响应*

重现步骤

  • 无响应*

环境

  • 无响应*

其他事项?

  • 无响应*
jhdbpxl9

jhdbpxl91#

It seems that langchain is incompatible with gptcache

wj8zmpe1

wj8zmpe12#

根据 this question in Microsoft ,当前的 GPT 3.5 turboGPT 3.5 turbo 16k 只支持聊天补全 API,因此我们只能使用基于聊天的模型作为 LLM 来开发相关链和代理。但是 gptcache this issue 有一些讨论表明它不支持聊天生成。然而,我也跟踪了错误信息,实际上该模型正在使用聊天补全 API 来打包消息,而当前的 gptcache 无法解析这种格式。我认为这可能是导致不兼容性的一个可能原因。

yduiuuwa

yduiuuwa3#

@hueiyuan 你能给我展示一个关于这个案例的完整演示代码吗?

a0zr77ik

a0zr77ik4#

这是我的示例演示代码,供您参考,您需要替换与配置相关的字典变量:

### GPTCache refactor
import os

import openai  # pylint: disable=C0413
import numpy as np

from gptcache.embedding.base import BaseEmbedding
from gptcache.utils import import_openai

import_openai()

AZURE_OPENAI_CONF = {
    "embedding_model_name":"....",
    "embedding_deployment_name":"....",
    "api_key":"....",
    "api_endpoint":"....",
    "api_version":"....",
    "api_type":"...."
}

class GPTCacheAzureOpenAIEmbedding(BaseEmbedding):
    """Generate text embedding for given text using OpenAI.

:param model: model name, defaults to 'text-embedding-ada-002'.
:type model: str
:param api_key: OpenAI API Key. When the parameter is not specified, it will load the key by default if it is available.
:type api_key: str

Example:
.. code-block:: python

from gptcache.embedding import OpenAI

test_sentence = 'Hello, world.'
encoder = OpenAI(api_key='your_openai_key')
embed = encoder.to_embeddings(test_sentence)
"""

    def __init__(
        self, 
        model: str = AZURE_OPENAI_CONF["embedding_model_name"], 
        deployment_id: str = AZURE_OPENAI_CONF["embedding_deployment_name"],
        api_key: str = AZURE_OPENAI_CONF["api_key"], 
        api_base: str = AZURE_OPENAI_CONF["api_endpoint"],
        api_version: str = AZURE_OPENAI_CONF["api_version"],
        api_type: str = AZURE_OPENAI_CONF["api_type"]
    ):
        if not api_key:
            if openai.api_key:
                api_key = openai.api_key
            else:
                api_key = os.getenv("OPENAI_API_KEY")
        if not api_base:
            if openai.api_base:
                api_base = openai.api_base
            else:
                api_base = os.getenv("OPENAI_API_BASE")
        
        openai.api_key = api_key
        openai.api_base = api_base
        openai.api_type = api_type
        openai.api_version = api_version
        
        self.api_base = api_base  # don't override all of openai as we may just want to override for say embeddings
        self.model = model
        self.deployment_id = deployment_id
        
        if model in self.dim_dict():
            self.__dimension = self.dim_dict()[model]
        else:
            self.__dimension = None

    def to_embeddings(self, data, **_):
        """Generate embedding given text input

:param data: text in string.
:type data: str

:return: a text embedding in shape of (dim,).
"""
        sentence_embeddings = openai.Embedding.create(
            model=self.model, 
            input=data, 
            api_base=self.api_base,
            deployment_id=self.deployment_id
        )
        return np.array(sentence_embeddings["data"][0]["embedding"]).astype("float32")

    @property
    def dimension(self):
        """Embedding dimension.

:return: embedding dimension
"""
        if not self.__dimension:
            foo_emb = self.to_embeddings("foo")
            self.__dimension = len(foo_emb)
        return self.__dimension

    @staticmethod
    def dim_dict():
        return {"text-embedding-ada-002": 1536}

from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation

LLM_CACHING_CONF ={
    "gptcache_store_conf": {
        "connect_string": "<postgresql-connect_string>",
        "maximum_text_length": 65535
    },
    "similarity_threshold": 0.8,
    "cache_eviction": "LRU"
}

class LLMGPTCaching:
    def __init__(self):
        self.cache_conf = Config(
            similarity_threshold=0.8
        )
        self.cache_openai_encoder = GPTCacheAzureOpenAIEmbedding()
        
        cache_base = CacheBase(
            'postgresql',
            sql_url = LLM_CACHING_CONF["gptcache_store_conf"]["connect_string"],
            table_len_config = {
                "question_question": LLM_CACHING_CONF["gptcache_store_conf"]["maximum_text_length"],
                "answer_answer": LLM_CACHING_CONF["gptcache_store_conf"]["maximum_text_length"],
                "session_id": LLM_CACHING_CONF["gptcache_store_conf"]["maximum_text_length"],
                "dep_name": LLM_CACHING_CONF["gptcache_store_conf"]["maximum_text_length"],
                "dep_data": LLM_CACHING_CONF["gptcache_store_conf"]["maximum_text_length"]
            }
        )
        
        vector_base = VectorBase(
            "pgvector",
            url=LLM_CACHING_CONF["gptcache_store_conf"]["connect_string"],
            collection_name="llm_cache",
            dimension=self.cache_openai_encoder.dimension
        )

        self.data_manager = get_data_manager(
            cache_base=cache_base,
            vector_base=vector_base,
            eviction=LLM_CACHING_CONF["cache_eviction"]
        )
    
    def init_llm_cache(self, cache_obj: Cache, llm: str):
        hashed_llm = hashlib.sha256(llm.encode()).hexdigest()
        
        init_similar_cache(
            cache_obj=cache_obj,
            data_dir=f"similar_cache_{hashed_llm}",
            embedding=self.cache_openai_encoder,
            data_manager=self.data_manager,
            evaluation=SearchDistanceEvaluation(),
            config=self.cache_conf
        )
        
langchain.llm_cache = GPTCache(init_llm_cache)

from langchain.chat_models import AzureChatOpenAI
openai_conf = {
    "gpt_model_name": "....",
    "gpt_deployment_name": "....",
    "api_version": "....",
    "api_endpoint": "....",
    "api_type": "....",
    "api_key": "....",
    "temperature": "....",
}

azure_openai_model = AzureChatOpenAI(
    model_name=openai_conf["gpt_model_name"],
    deployment_name=openai_conf["gpt_deployment_name"],
    openai_api_version=openai_conf["api_version"],
    openai_api_base=openai_conf["api_endpoint"],
    openai_api_type=openai_conf["api_type"],
    openai_api_key=openai_conf["api_key"],
    temperature=openai_conf["temperature"]
)

from langchain.agents.structured_chat.base import StructuredChatAgent
from langchain.chains import LLMChain
from langchain.agents.agent import Agent
from langchain.agents import AgentExecutor

agent_prompt = StructuredChatAgent.create_prompt(
    tools=current_tools,
    prefix=AGENT_PREFIX,
    human_message_template=HUMAN_MESSAGE_TEMPLATE,
    suffix=AGENT_SUFFIX,
    format_instructions=AGENT_FORMAT_INSTRUCTIONS,
    input_variables=["input", "chat_history", "agent_scratchpad"],
)

agent_llm_chain = LLMChain(
    llm=azure_openai_model,
    prompt=agent_prompt,
    # verbose=True
)

agent = StructuredChatAgent(
    llm_chain=agent_llm_chain,
    tools=current_tools,
    early_stopping_method="force",
    # verbose=True
)

agen_chain = AgentExecutor.from_agent_and_tools(
    agent=agent,
    tools=current_tools,
    # verbose=True, 
)

agen_chain.run(".......")

相关问题