From 039730ab46dd23ddacad86291d19ad496e5e4898 Mon Sep 17 00:00:00 2001 From: Van QA Date: Tue, 27 Aug 2024 13:54:40 +0700 Subject: [PATCH 1/3] feat: add gemma 2 --- .../{gemma-2b => gemma-1.1-2b}/model.json | 12 +++--- .../{gemma-7b => gemma-1.1-7b}/model.json | 10 ++--- .../resources/models/gemma-2-27b/model.json | 41 ++++++++++++++++++ .../resources/models/gemma-2-2b/model.json | 42 +++++++++++++++++++ .../resources/models/gemma-2-9b/model.json | 41 ++++++++++++++++++ .../rollup.config.ts | 14 +++++-- 6 files changed, 145 insertions(+), 15 deletions(-) rename extensions/inference-nitro-extension/resources/models/{gemma-2b => gemma-1.1-2b}/model.json (68%) rename extensions/inference-nitro-extension/resources/models/{gemma-7b => gemma-1.1-7b}/model.json (69%) create mode 100644 extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json create mode 100644 extensions/inference-nitro-extension/resources/models/gemma-2-2b/model.json create mode 100644 extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json diff --git a/extensions/inference-nitro-extension/resources/models/gemma-2b/model.json b/extensions/inference-nitro-extension/resources/models/gemma-1.1-2b/model.json similarity index 68% rename from extensions/inference-nitro-extension/resources/models/gemma-2b/model.json rename to extensions/inference-nitro-extension/resources/models/gemma-1.1-2b/model.json index 68cff325a7..56cd9c81cb 100644 --- a/extensions/inference-nitro-extension/resources/models/gemma-2b/model.json +++ b/extensions/inference-nitro-extension/resources/models/gemma-1.1-2b/model.json @@ -1,20 +1,20 @@ { "sources": [ { - "filename": "gemma-2b-it-q4_k_m.gguf", - "url": "https://huggingface.co/lmstudio-ai/gemma-2b-it-GGUF/resolve/main/gemma-2b-it-q4_k_m.gguf" + "filename": "gemma-1.1-2b-it-q4_k_m.gguf", + "url": "https://huggingface.co/bartowski/gemma-1.1-2b-it-GGUF/resolve/main/gemma-1.1-2b-it-Q4_K_M.gguf" } ], - "id": "gemma-2b", + "id": "gemma-1.1-2b-it", "object": "model", - "name": "Gemma 2B Q4", + "name": "Gemma 1.1 2B Q4", "version": "1.3", "description": "Gemma is built from the same technology with Google's Gemini.", "format": "gguf", "settings": { "ctx_len": 8192, "prompt_template": "user\n{prompt}\nmodel", - "llama_model_path": "gemma-2b-it-q4_k_m.gguf", + "llama_model_path": "gemma-1.1-2b-it-Q4_K_M.gguf", "ngl": 19 }, "parameters": { @@ -29,7 +29,7 @@ "metadata": { "author": "Google", "tags": ["2B", "Finetuned", "Tiny"], - "size": 1500000000 + "size": 1630000000 }, "engine": "nitro" } diff --git a/extensions/inference-nitro-extension/resources/models/gemma-7b/model.json b/extensions/inference-nitro-extension/resources/models/gemma-1.1-7b/model.json similarity index 69% rename from extensions/inference-nitro-extension/resources/models/gemma-7b/model.json rename to extensions/inference-nitro-extension/resources/models/gemma-1.1-7b/model.json index 615f1149b3..5bd89b478a 100644 --- a/extensions/inference-nitro-extension/resources/models/gemma-7b/model.json +++ b/extensions/inference-nitro-extension/resources/models/gemma-1.1-7b/model.json @@ -1,20 +1,20 @@ { "sources": [ { - "filename": "gemma-7b-it-q4_K_M.gguf", - "url": "https://huggingface.co/mmnga/gemma-7b-it-gguf/resolve/main/gemma-7b-it-q4_K_M.gguf" + "filename": "gemma-1.1-7b-it-q4_K_M.gguf", + "url": "https://huggingface.co/bartowski/gemma-1.1-7b-it-GGUF/resolve/main/gemma-1.1-7b-it-Q4_K_M.gguf" } ], - "id": "gemma-7b", + "id": "gemma-1.1-7b-it", "object": "model", - "name": "Gemma 7B Q4", + "name": "Gemma 1.1 7B Q4", "version": "1.2", "description": "Google's Gemma is built for multilingual purpose", "format": "gguf", "settings": { "ctx_len": 8192, "prompt_template": "user\n{prompt}\nmodel", - "llama_model_path": "gemma-7b-it-q4_K_M.gguf", + "llama_model_path": "gemma-1.1-7b-it-q4_K_M.gguf", "ngl": 29 }, "parameters": { diff --git a/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json b/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json new file mode 100644 index 0000000000..a7bbc51f20 --- /dev/null +++ b/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json @@ -0,0 +1,41 @@ +{ + "sources": [ + { + "filename": "gemma-2-27b-it-Q4_K_M.gguf", + "url": "https://huggingface.co/bartowski/gemma-2-27b-it-GGUF/resolve/main/gemma-2-27b-it-Q4_K_M.gguf" + } + ], + "id": "gemma-2-27b-it", + "object": "model", + "name": "Gemma 2 27B Q4", + "version": "1.0", + "description": "Gemma is built from the same technology with Google's Gemini.", + "format": "gguf", + "settings": { + "ctx_len": 8192, + "prompt_template": "user\n{prompt}\nmodel\n\nmodel\n", + "llama_model_path": "gemma-2-27b-it-Q4_K_M.gguf", + "ngl": 19 + }, + "parameters": { + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "max_tokens": 8192, + "stop": [ + "" + ], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Google", + "tags": [ + "27B", + "Conversational", + "Text-generation" + ], + "size": 1710000000 + }, + "engine": "nitro" +} diff --git a/extensions/inference-nitro-extension/resources/models/gemma-2-2b/model.json b/extensions/inference-nitro-extension/resources/models/gemma-2-2b/model.json new file mode 100644 index 0000000000..2d38d5ec47 --- /dev/null +++ b/extensions/inference-nitro-extension/resources/models/gemma-2-2b/model.json @@ -0,0 +1,42 @@ +{ + "sources": [ + { + "filename": "gemma-2-2b-it-Q4_K_M.gguf", + "url": "https://huggingface.co/bartowski/gemma-2-2b-it-GGUF/resolve/main/gemma-2-2b-it-Q4_K_M.gguf" + } + ], + "id": "gemma-2-2b-it", + "object": "model", + "name": "Gemma 2 2B Q4", + "version": "1.0", + "description": "Gemma is built from the same technology with Google's Gemini.", + "format": "gguf", + "settings": { + "ctx_len": 8192, + "prompt_template": "user\n{prompt}\nmodel\n\nmodel\n", + "llama_model_path": "gemma-2-2b-it-Q4_K_M.gguf", + "ngl": 19 + }, + "parameters": { + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "max_tokens": 8192, + "stop": [ + "" + ], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Google", + "tags": [ + "2B", + "Tiny", + "Conversational", + "Text-generation" + ], + "size": 1710000000 + }, + "engine": "nitro" +} diff --git a/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json b/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json new file mode 100644 index 0000000000..6943e58a13 --- /dev/null +++ b/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json @@ -0,0 +1,41 @@ +{ + "sources": [ + { + "filename": "gemma-2-9b-it-Q4_K_M.gguf", + "url": "https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/resolve/main/gemma-2-9b-it-Q4_K_M.gguf" + } + ], + "id": "gemma-2-9b-it", + "object": "model", + "name": "Gemma 2 9B Q4", + "version": "1.0", + "description": "Gemma is built from the same technology with Google's Gemini.", + "format": "gguf", + "settings": { + "ctx_len": 8192, + "prompt_template": "user\n{prompt}\nmodel\n\nmodel\n", + "llama_model_path": "gemma-2-9b-it-Q4_K_M.gguf", + "ngl": 19 + }, + "parameters": { + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "max_tokens": 8192, + "stop": [ + "" + ], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Google", + "tags": [ + "9B", + "Conversational", + "Text-generation" + ], + "size": 1710000000 + }, + "engine": "nitro" +} diff --git a/extensions/inference-nitro-extension/rollup.config.ts b/extensions/inference-nitro-extension/rollup.config.ts index a4d0e8094c..fdd11f961a 100644 --- a/extensions/inference-nitro-extension/rollup.config.ts +++ b/extensions/inference-nitro-extension/rollup.config.ts @@ -12,8 +12,8 @@ const codeninja7bJson = require('./resources/models/codeninja-1.0-7b/model.json' const commandr34bJson = require('./resources/models/command-r-34b/model.json') const deepseekCoder13bJson = require('./resources/models/deepseek-coder-1.3b/model.json') const deepseekCoder34bJson = require('./resources/models/deepseek-coder-34b/model.json') -const gemma2bJson = require('./resources/models/gemma-2b/model.json') -const gemma7bJson = require('./resources/models/gemma-7b/model.json') +const gemma112bJson = require('./resources/models/gemma-1.1-2b/model.json') +const gemma117bJson = require('./resources/models/gemma-1.1-7b/model.json') const llama2Chat70bJson = require('./resources/models/llama2-chat-70b/model.json') const llama2Chat7bJson = require('./resources/models/llama2-chat-7b/model.json') const llamacorn1bJson = require('./resources/models/llamacorn-1.1b/model.json') @@ -42,6 +42,9 @@ const codestralJson = require('./resources/models/codestral-22b/model.json') const qwen2Json = require('./resources/models/qwen2-7b/model.json') const llama318bJson = require('./resources/models/llama3.1-8b-instruct/model.json') const llama3170bJson = require('./resources/models/llama3.1-70b-instruct/model.json') +const gemma22bJson = require('./resources/models/gemma-2-2b/model.json') +const gemma29bJson = require('./resources/models/gemma-2-9b/model.json') +const gemma227bJson = require('./resources/models/gemma-2-27b/model.json') export default [ { @@ -61,8 +64,8 @@ export default [ commandr34bJson, deepseekCoder13bJson, deepseekCoder34bJson, - gemma2bJson, - gemma7bJson, + gemma112bJson, + gemma117bJson, llama2Chat70bJson, llama2Chat7bJson, llamacorn1bJson, @@ -91,6 +94,9 @@ export default [ qwen2Json, llama318bJson, llama3170bJson, + gemma22bJson, + gemma29bJson, + gemma227bJson ]), NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`), DEFAULT_SETTINGS: JSON.stringify(defaultSettingJson), From 94cb1f03fabfa637f4ff9f2bfa76c5478b5eefbe Mon Sep 17 00:00:00 2001 From: Van QA Date: Tue, 27 Aug 2024 13:58:45 +0700 Subject: [PATCH 2/3] feat: add gemma 2 --- .../resources/models/gemma-2-27b/model.json | 2 +- .../resources/models/gemma-2-9b/model.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json b/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json index a7bbc51f20..25a5d59fa2 100644 --- a/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json +++ b/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json @@ -35,7 +35,7 @@ "Conversational", "Text-generation" ], - "size": 1710000000 + "size": 16600000000 }, "engine": "nitro" } diff --git a/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json b/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json index 6943e58a13..994caec4fc 100644 --- a/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json +++ b/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json @@ -35,7 +35,7 @@ "Conversational", "Text-generation" ], - "size": 1710000000 + "size": 5760000000 }, "engine": "nitro" } From 524ccb4da247cf73248bdbb364a66eea05c6d22d Mon Sep 17 00:00:00 2001 From: Van QA Date: Tue, 27 Aug 2024 14:27:43 +0700 Subject: [PATCH 3/3] feat: correct ngl --- .../resources/models/gemma-2-27b/model.json | 2 +- .../resources/models/gemma-2-2b/model.json | 2 +- .../resources/models/gemma-2-9b/model.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json b/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json index 25a5d59fa2..80c456dc05 100644 --- a/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json +++ b/extensions/inference-nitro-extension/resources/models/gemma-2-27b/model.json @@ -15,7 +15,7 @@ "ctx_len": 8192, "prompt_template": "user\n{prompt}\nmodel\n\nmodel\n", "llama_model_path": "gemma-2-27b-it-Q4_K_M.gguf", - "ngl": 19 + "ngl": 47 }, "parameters": { "temperature": 0.7, diff --git a/extensions/inference-nitro-extension/resources/models/gemma-2-2b/model.json b/extensions/inference-nitro-extension/resources/models/gemma-2-2b/model.json index 2d38d5ec47..07eef2d7e1 100644 --- a/extensions/inference-nitro-extension/resources/models/gemma-2-2b/model.json +++ b/extensions/inference-nitro-extension/resources/models/gemma-2-2b/model.json @@ -15,7 +15,7 @@ "ctx_len": 8192, "prompt_template": "user\n{prompt}\nmodel\n\nmodel\n", "llama_model_path": "gemma-2-2b-it-Q4_K_M.gguf", - "ngl": 19 + "ngl": 27 }, "parameters": { "temperature": 0.7, diff --git a/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json b/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json index 994caec4fc..1b5f4d3c8d 100644 --- a/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json +++ b/extensions/inference-nitro-extension/resources/models/gemma-2-9b/model.json @@ -15,7 +15,7 @@ "ctx_len": 8192, "prompt_template": "user\n{prompt}\nmodel\n\nmodel\n", "llama_model_path": "gemma-2-9b-it-Q4_K_M.gguf", - "ngl": 19 + "ngl": 43 }, "parameters": { "temperature": 0.7,