diff --git a/public/locales/en/model.json b/public/locales/en/model.json
index 2dd3d04..07d9828 100644
--- a/public/locales/en/model.json
+++ b/public/locales/en/model.json
@@ -4,10 +4,18 @@
"default": "Default",
"temperature": {
"label": "Temperature",
- "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic (Default: 1)"
+ "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top p but not both. (Default: 1)"
},
"presencePenalty": {
"label": "Presence Penalty",
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. (Default: 0)"
+ },
+ "topP": {
+ "label": "Top P",
+ "description": "Number between 0 and 1. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. (Default: 1)"
+ },
+ "frequencyPenalty": {
+ "label": "Frequency Penalty",
+ "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. (Default: 0)"
}
}
diff --git a/public/locales/zh-CN/model.json b/public/locales/zh-CN/model.json
index 51ce4af..9677cb5 100644
--- a/public/locales/zh-CN/model.json
+++ b/public/locales/zh-CN/model.json
@@ -4,10 +4,18 @@
"default": "默认",
"temperature": {
"label": "采样温度",
- "description": "使用何种采样温度,值在 0 到 2 之间。较高的数值如 0.8 会使输出更加随机,而较低的数值如 0.2 会使输出更加集中和确定。(默认: 1)"
+ "description": "使用何种采样温度,值在 0 到 2 之间。较高的数值如 0.8 会使输出更加随机,而较低的数值如 0.2 会使输出更加集中和确定。我们通常建议修改此参数或概率质量,但不要同时修改两者。(默认: 1)"
},
"presencePenalty": {
"label": "存在惩罚",
"description": "数值在 -2.0 到 2.0 之间。正值会根据新 token 是否已经出现在文本中来惩罚它们,增加模型谈论新话题的可能性。 (默认: 0)"
+ },
+ "topP": {
+ "label": "概率质量",
+ "description": "数值在 0 到 1 之间。采用核采样(nucleus sampling)的一种采样温度的替代方法,模型考虑具有最高概率质量的 token 的结果。因此,0.1 表示仅考虑占前 10% 概率质量的 token。我们通常建议修改此参数或采样温度,但不要同时修改两者。(默认: 1)"
+ },
+ "frequencyPenalty": {
+ "label": "频率惩罚",
+ "description": "数值在 -2.0 到 2.0 之间。正值会根据新 token 在文本中的现有频率来惩罚它们,降低模型直接重复相同语句的可能性。(默认: 0)"
}
}
diff --git a/src/components/Chat/ChatContent/ChatTitle.tsx b/src/components/Chat/ChatContent/ChatTitle.tsx
index 3c760d2..0a8409a 100644
--- a/src/components/Chat/ChatContent/ChatTitle.tsx
+++ b/src/components/Chat/ChatContent/ChatTitle.tsx
@@ -43,7 +43,7 @@ const ChatTitle = React.memo(() => {
return config ? (
<>