From b0d866aedce4595c0ed18cd66bf3794e4c136902 Mon Sep 17 00:00:00 2001 From: Amna Mubashar Date: Tue, 29 Oct 2024 13:06:27 +0100 Subject: [PATCH] Update for project_id as optional (#277) --- integrations/google-vertex-ai.md | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/integrations/google-vertex-ai.md b/integrations/google-vertex-ai.md index 264af857..563c9caa 100644 --- a/integrations/google-vertex-ai.md +++ b/integrations/google-vertex-ai.md @@ -57,7 +57,8 @@ Once installed, you will have access to various Haystack Generators: To use Vertex AI models, you need to have a Google Cloud Platform account and be logged in using Application Default Credentials (ADCs). For more info see the [official documentation](https://colab.research.google.com/corgiredirector?site=https%3A%2F%2Fcloud.google.com%2Fdocs%2Fauthentication%2Fprovide-credentials-adc). -To start using Vertex AI generators in Haystack, you need to set the `project_id` variable to a valid project ID that have enough authorization to use Vertex AI. Find your `project_id` in the [GCP resource manager](https://console.cloud.google.com/cloud-resource-manager) or locally by running `gcloud projects list` in your terminal. For more info on the gcloud CLI see the [official documentation](https://cloud.google.com/cli). +To start using Vertex AI generators in Haystack, it is essential that your account has access to a project authorized to use Google Vertex AI endpoints. The `project_id` needed for initialization of Vertex AI generators is set during GCP authentication mentioned above. Additonally, you can also set a different `project_id` by passing it as a variable during initialization of the generator. +You can find your `project_id` in the [GCP resource manager](https://console.cloud.google.com/cloud-resource-manager) or locally by running `gcloud projects list` in your terminal. For more info on the gcloud CLI see the [official documentation](https://cloud.google.com/cli). ### Gemini API models @@ -71,7 +72,7 @@ To use Gemini model for text generation, initialize a `VertexAIGeminiGenerator` from haystack_integrations.components.generators.google_vertex import VertexAIGeminiGenerator -gemini_generator = VertexAIGeminiGenerator(model="gemini-pro", project_id=project_id) +gemini_generator = VertexAIGeminiGenerator(model="gemini-pro") result = gemini_generator.run(parts = ["What is assemblage in art?"]) print(result["replies"][0]) ``` @@ -82,7 +83,7 @@ Assemblage in art refers to the creation of a three-dimensional artwork by combi **Multimodality with `gemini-1.5-flash`** -To use `gemini-1.5-flash` model for visual question answering, initialize a `VertexAIGeminiGenerator` with `"gemini-1.5-flash"` and `project_id`. Then, run it with the images as well as the prompt: +To use `gemini-1.5-flash` model for visual question answering, initialize a `VertexAIGeminiGenerator` with `"gemini-1.5-flash"`. Then, run it with the images as well as the prompt: ```python import requests @@ -99,7 +100,7 @@ images = [ ByteStream(data=requests.get(url).content, mime_type="image/jpeg") for url in URLS ] -gemini_generator = VertexAIGeminiGenerator(model="gemini-1.5-flash", project_id=project_id) +gemini_generator = VertexAIGeminiGenerator(model="gemini-1.5-flash") result = gemini_generator.run(parts = ["What can you tell me about these robots?", *images]) for answer in result["replies"]: print(answer) @@ -116,7 +117,7 @@ The fourth image is of Marvin from the 1977 film The Hitchhiker's Guide to the G ### PaLM API Models -You can leverage PaLM API models `text-bison`, `text-unicorn` and `text-bison-32k` through `VertexAITextGenerator` for task generation. To use PaLM models, initialize a `VertexAITextGenerator` with model name and `project_id`. +You can leverage PaLM API models `text-bison`, `text-unicorn` and `text-bison-32k` through `VertexAITextGenerator` for task generation. To use PaLM models, initialize a `VertexAITextGenerator` with model name. Here'a an example of using `text-unicorn` model with VertexAITextGenerator to extract information as a JSON file: @@ -124,7 +125,7 @@ Here'a an example of using `text-unicorn` model with VertexAITextGenerator to ex from haystack_integrations.components.generators.google_vertex import VertexAITextGenerator -palm_llm = VertexAITextGenerator(model="text-unicorn", project_id=project_id) +palm_llm = VertexAITextGenerator(model="text-unicorn") palm_llm_result = palm_llm.run( """Extract the technical specifications from the text below in a JSON format. Valid fields are name, network, ram, processor, storage, and color. Text: Google Pixel 7, 5G network, 8GB RAM, Tensor G2 processor, 128GB of storage, Lemongrass @@ -135,14 +136,14 @@ print(palm_llm_result["replies"][0]) ### Codey API Models -You can leverage Codey API models, `code-bison`, `code-bison-32k` and `code-gecko`, through `VertexAICodeGenerator` for code generation. To use Codey models, initialize a `VertexAICodeGenerator` with model name and `project_id`. +You can leverage Codey API models, `code-bison`, `code-bison-32k` and `code-gecko`, through `VertexAICodeGenerator` for code generation. To use Codey models, initialize a `VertexAICodeGenerator` with model name. Here'a an example of using `code-bison` model for **code generation**: ```python from haystack_integrations.components.generators.google_vertex import VertexAICodeGenerator -codey_llm = VertexAICodeGenerator(model="code-bison", project_id=project_id) +codey_llm = VertexAICodeGenerator(model="code-bison") codey_llm_result = codey_llm.run("Write a code for calculating fibonacci numbers in JavaScript") print(codey_llm_result["replies"][0]) ``` @@ -152,7 +153,7 @@ Here'a an example of using `code-gecko` model for **code completion**: from haystack_integrations.components.generators.google_vertex import VertexAICodeGenerator -codey_llm = VertexAICodeGenerator(model="code-gecko", project_id=project_id) +codey_llm = VertexAICodeGenerator(model="code-gecko") codey_llm_result = codey_llm.run("""function fibonacci(n) { // Base cases if (n <= 1) { @@ -168,7 +169,7 @@ You can leverage Imagen models through three components: [VertexAIImageCaptioner **Image Generation with `imagegeneration`** -To generate an image, initialize a VertexAIImageGenerator with the `imagegeneration` and the `project_id`, Then, you can run it with a prompt: +To generate an image, initialize a VertexAIImageGenerator with the `imagegeneration`. Then, you can run it with a prompt: ```python import io @@ -176,7 +177,7 @@ import PIL.Image as Image from haystack_integrations.components.generators.google_verteximport VertexAIImageGenerator -image_generator = VertexAIImageGenerator(model="imagegeneration", project_id=project_id) +image_generator = VertexAIImageGenerator(model="imagegeneration") image_generator_result = image_generator.run("magazine style, 4k, photorealistic, modern red armchair, natural lighting") ## (Optional) Save the generated image @@ -186,13 +187,13 @@ image.save("output.png") **Image Captioning with `imagetext`** -To use generate image captions, initialize a VertexAIImageCaptioner with the `imagetext` model and `project_id`. Then, you can run the VertexAIImageCaptioner with the image that you want to caption: +To use generate image captions, initialize a VertexAIImageCaptioner with the `imagetext` model. Then, you can run the VertexAIImageCaptioner with the image that you want to caption: ```python from haystack_integrations.components.generators.google_vertex import VertexAIImageCaptioner -image_captioner = VertexAIImageCaptioner(model='imagetext', project_id=project_id) +image_captioner = VertexAIImageCaptioner(model='imagetext') image = ByteStream.from_file_path("output.png") # you can use the generated image image_captioner_result = image_captioner.run(image=image) @@ -201,14 +202,14 @@ print(image_captioner_result["captions"]) **Visual Question Answering (VQA) with `imagetext`** -To answer questions about an image, initialize a VertexAIImageQA with the `imagetext` model and `project_id`. Then, you can run it with the `image` and the `question`: +To answer questions about an image, initialize a VertexAIImageQA with the `imagetext` model. Then, you can run it with the `image` and the `question`: ```python from haystack.dataclasses.byte_stream import ByteStream from haystack_integrations.components.generators.google_vertex import VertexAIImageQA -visual_qa = VertexAIImageQA(model='imagetext', project_id=project_id) +visual_qa = VertexAIImageQA(model='imagetext') image = ByteStream.from_file_path("output.png") # you can use the generated image question = "what's the color of the furniture?"