summaryrefslogtreecommitdiffstats
path: root/g4f/gui/client
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/gui/client')
-rw-r--r--g4f/gui/client/index.html1
-rw-r--r--g4f/gui/client/static/js/chat.v1.js2
2 files changed, 2 insertions, 1 deletions
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
index a6c4909b..d84bbbe9 100644
--- a/g4f/gui/client/index.html
+++ b/g4f/gui/client/index.html
@@ -220,6 +220,7 @@
<option value="gpt-4">gpt-4</option>
<option value="gpt-3.5-turbo">gpt-3.5-turbo</option>
<option value="llama2-70b">llama2-70b</option>
+ <option value="llama3-70b-instruct">llama3-70b-instruct</option>
<option value="gemini-pro">gemini-pro</option>
<option value="">----</option>
</select>
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index a17be16e..39027260 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -926,7 +926,7 @@ colorThemes.forEach((themeOption) => {
function count_tokens(model, text) {
if (model) {
if (window.llamaTokenizer)
- if (model.startsWith("llama2") || model.startsWith("codellama")) {
+ if (model.startsWith("llama") || model.startsWith("codellama")) {
return llamaTokenizer.encode(text).length;
}
if (window.mistralTokenizer)