diff --git a/app/jobs/story_create_job.rb b/app/jobs/story_create_job.rb index 4263aa6..b91243b 100644 --- a/app/jobs/story_create_job.rb +++ b/app/jobs/story_create_job.rb @@ -15,16 +15,14 @@ def perform(*args) story_template = Langchain::Prompt.load_from_path(file_path: "app/prompts/story_create_template.yaml") prompt_text = story_template.format(mythology_name: story.mythology.name, title: story.title) - + role_template = Langchain::Prompt.load_from_path(file_path: "app/prompts/story_role_template.yaml") StoryGod.where(story_id: args[0]).find_each do |g| - prompt_text ||= "TODO: Fix this" # TODO: story_create_job_spec.rb fails without this line prompt_text += " " + role_template.format(god: g.god.name, role: g.role) end instructions = story.instructions if instructions - prompt_text ||= "TODO: Fix this" # TODO: story_create_job_spec.rb fails without this line prompt = prompt_text + " Additional instructions for generating the story are " + instructions end diff --git a/spec/jobs/story_create_job_spec.rb b/spec/jobs/story_create_job_spec.rb index 9eb9094..c921e1d 100644 --- a/spec/jobs/story_create_job_spec.rb +++ b/spec/jobs/story_create_job_spec.rb @@ -25,24 +25,4 @@ expect(described_class.new.perform(story.id)).to be true end - - it "uses the default LLM if no llm_name is provided" do - allow(Story).to receive(:find).and_return(story) - llm = instance_double(Langchain::LLM::OpenAI) - llm_response = instance_double(Langchain::LLM::OpenAIResponse) - - allow(llm_response).to receive(:completion).and_return("Something") - allow(llm).to receive(:complete).and_return(llm_response) - - prompt_template = instance_double(Langchain::Prompt::PromptTemplate) - allow(prompt_template).to receive(:format) - - allow(Langchain::LLM::OpenAI).to receive(:new).and_return(llm) - allow(Langchain::Prompt::PromptTemplate).to receive(:new).and_return(prompt_template) - - described_class.new.perform(1) - - # Verify that the default LLM is used - expect(Langchain::LLM::OpenAI).to have_received(:new) - end end