ChatGPT-3 OpenAI API请求负载的哪一部分受到最大令牌数量的限制?

ujv3wf0j  于 2023-10-24  发布在  其他
关注(0)|答案(1)|浏览(281)

我有点明白如何计算字符中的token,但实际上我需要计算什么呢?如果我有一个这样的有效载荷:

{
  "model": "gpt-3.5-turbo",
  "temperature": 1,
  "max_tokens": 400,
  "presence_penalty": 0.85,
  "frequency_penalty": 0.85,
  "messages": [
    {
      "role": "system",
      "content": "prompt"
    },
    {
      "role": "assistant",
      "content": "message"
    },
    // tens of messages
  ]
}

我是否必须计算其中的token全部?或者我是否必须只计算"messages"?如果是这样,我是否必须计算所有的json语法字符,如空格键,括号和逗号?"role""content"键呢?"role"值呢?
或者我必须简单地将所有的"content"值合并到一个字符串中,并仅基于它来计算令牌?(这是我 * 希望 * 得到的答案,呵呵)

tkclm6bt

tkclm6bt1#

根据我的理解和计算,"messages"中提供的列表中的所有标记都被计算在内。这包括键“role”和“content”及其值,但不包括空格,括号,逗号和引号。
我使用OpenAI提供的以下脚本来计算输入中的令牌数量。我修改了脚本,以计算多个消息的输入(而不是输出响应)所涉及的成本,对我来说相当准确。

import json
import os
import tiktoken
import numpy as np
from collections import defaultdict

def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
    """Return the number of tokens used by a list of messages."""
    try:
        encoding = tiktoken.encoding_for_model(model)
    except KeyError:
        print("Warning: model not found. Using cl100k_base encoding.")
        encoding = tiktoken.get_encoding("cl100k_base")
    if model in {
        "gpt-3.5-turbo-0613",
        "gpt-3.5-turbo-16k-0613",
        "gpt-4-0613",
        "gpt-4-32k-0613",
        }:
        tokens_per_message = 3
        tokens_per_name = 1
    elif model == "gpt-3.5-turbo-0301":
        tokens_per_message = 4  # every message follows <|start|>{role/name}\n{content}<|end|>\n
        tokens_per_name = -1  # if there's a name, the role is omitted
    elif "gpt-3.5-turbo" in model:
        print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
        return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
    elif "gpt-4" in model:
        print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
        return num_tokens_from_messages(messages, model="gpt-4-0613")
    else:
        raise NotImplementedError(
            f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
        )
    num_tokens = 0
    for message in messages:
        num_tokens += tokens_per_message
        for key, value in message.items():
            num_tokens += len(encoding.encode(value))
            if key == "name":
                num_tokens += tokens_per_name
    num_tokens += 3  # every reply is primed with <|start|>assistant<|message|>
    return num_tokens

convo_lens = []

for ex in dataset: #Your list of inputs
    messages = ex["messages"]
    convo_lens.append(num_tokens_from_messages(messages))

n_input_tokens_in_dataset = sum(min(4096, length) for length in convo_lens)
print(f"Input portion of the data has ~{n_input_tokens_in_dataset} tokens")

# costs as of Aug 29 2023.
costs = {
    "gpt-4-0613": {
        "input" : 0.03,
        "output": 0.06
    },
    "gpt-4-32k-0613": {
        "input" : 0.06,
        "output": 0.12
    },
    "gpt-3.5-turbo-0613": {
        "input": 0.0015,
        "output": 0.002
    },

    "gpt-3.5-turbo-16k-0613": {
        "input": 0.003,
        "output": 0.004
    }
}

# We select GPT 3.5 turbo here
print(f"Cost of inference: ${(n_input_tokens_in_dataset/1000) * costs['gpt-3.5-turbo-0613']['input']}")

相关问题