diff --git a/CHANGELOG.md b/CHANGELOG.md index a48b01c..f9afcd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +## [0.4.4] - 2024-10-31 +- Define the Ruby version for the gem +- Add Completion feature for Gemini + ## [0.2.0] - 2023-10-22 - Add image generation abilities diff --git a/Gemfile.lock b/Gemfile.lock index ae8382f..03283c2 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,7 +1,7 @@ PATH remote: . specs: - gen-ai (0.4.3) + gen-ai (0.4.4) activesupport (~> 7.1) faraday (~> 2.7) faraday-multipart (~> 1.0) @@ -143,6 +143,7 @@ GEM PLATFORMS arm64-darwin-22 arm64-darwin-23 + x86_64-darwin-22 x86_64-linux DEPENDENCIES diff --git a/lib/gen_ai/language/gemini.rb b/lib/gen_ai/language/gemini.rb index b9e2b22..4614988 100644 --- a/lib/gen_ai/language/gemini.rb +++ b/lib/gen_ai/language/gemini.rb @@ -21,29 +21,40 @@ def initialize(token:, options: {}) ) end - def complete(prompt, options = {}); end + def complete(prompt, options = {}) + response = @client.generate_content(generate_completion_options(prompt, options)) + + build_result(model: model(options), raw: response, parsed: extract_completions(response)) + end def chat(messages, options = {}, &block) if block_given? response = @client.stream_generate_content( - generate_options(messages, options), server_sent_events: true, &chunk_process_block(block) + generate_chat_options(messages, options), server_sent_events: true, &chunk_process_block(block) ) build_result(model: model(options), raw: response.first, parsed: extract_completions(response).flatten) else - response = @client.generate_content(generate_options(messages, options)) + response = @client.generate_content(generate_chat_options(messages, options)) build_result(model: model(options), raw: response, parsed: extract_completions(response)) end end private - def generate_options(messages, options) + def generate_chat_options(messages, options) { contents: format_messages(messages), generationConfig: options.except(:model) } end + def generate_completion_options(prompt, options) + { + contents: [{ role: DEFAULT_ROLE, parts: [text: prompt] }], + generationConfig: options.except(:model) + } + end + def model(options) options[:model] || COMPLETION_MODEL end diff --git a/lib/gen_ai/version.rb b/lib/gen_ai/version.rb index 0c80090..1095d5a 100644 --- a/lib/gen_ai/version.rb +++ b/lib/gen_ai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module GenAI - VERSION = '0.4.3' + VERSION = '0.4.4' end diff --git a/spec/fixtures/cassettes/gemini/language/complete_default_prompt.yml b/spec/fixtures/cassettes/gemini/language/complete_default_prompt.yml new file mode 100644 index 0000000..bf471a5 --- /dev/null +++ b/spec/fixtures/cassettes/gemini/language/complete_default_prompt.yml @@ -0,0 +1,105 @@ +--- +http_interactions: +- request: + method: post + uri: https://generativelanguage.googleapis.com/v1/models/gemini-pro:generateContent?key=FAKE_TOKEN + body: + encoding: UTF-8 + string: '{"contents":[{"role":"user","parts":[{"text":"Hello"}]}],"generationConfig":{}}' + headers: + User-Agent: + - Faraday v2.9.0 + Content-Type: + - application/json + Expect: + - '' + response: + status: + code: 200 + message: OK + headers: + Content-Type: + - application/json; charset=UTF-8 + Vary: + - Origin + - Referer + - X-Origin + Content-Encoding: + - gzip + Date: + - Wed, 31 Jan 2024 11:02:30 GMT + Server: + - scaffolding on HTTPServer2 + Cache-Control: + - private + Content-Length: + - '317' + X-Xss-Protection: + - '0' + X-Frame-Options: + - SAMEORIGIN + X-Content-Type-Options: + - nosniff + Server-Timing: + - gfet4t7; dur=1860 + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + body:body: + encoding: ASCII-8BIT + string: | + { + "candidates": [ + { + "content": { + "parts": [ + { + "text": "Hi there! How can I assist you today?" + } + ], + "role": "model" + }, + "finishReason": "STOP", + "index": 0, + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } + ], + "promptFeedback": { + "safetyRatings": [ + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_HARASSMENT", + "probability": "NEGLIGIBLE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "probability": "NEGLIGIBLE" + } + ] + } + } + recorded_at: Wed, 31 Jan 2024 11:02:30 GMT +recorded_with: VCR 6.2.0 diff --git a/spec/language/gemini/completion_spec.rb b/spec/language/gemini/completion_spec.rb new file mode 100644 index 0000000..b697e24 --- /dev/null +++ b/spec/language/gemini/completion_spec.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +RSpec.describe GenAI::Language do + describe 'Gemini' do + describe '#completion' do + let(:provider) { :gemini } + let(:token) { ENV['API_ACCESS_TOKEN'] || 'FAKE_TOKEN' } + let(:instance) { described_class.new(provider, token) } + let(:cassette) { 'gemini/language/complete_default_prompt' } + + subject { instance.complete('Hello') } + + it 'returns completions' do + VCR.use_cassette(cassette) do + expect(subject).to be_a(GenAI::Result) + + expect(subject.provider).to eq(:gemini) + expect(subject.model).to eq('gemini-pro') + + expect(subject.value).to eq('Hi there! How can I assist you today?') + expect(subject.values).to eq(['Hi there! How can I assist you today?']) + + expect(subject.prompt_tokens).to eq(nil) + expect(subject.completion_tokens).to eq(nil) + expect(subject.total_tokens).to eq(nil) + end + end + end + end +end