Method: projects.locations.endpoints.generateContent

Generate content with multimodal inputs.

Endpoint

post https://{endpoint}/v1beta1/{model}:generateContent

Where {service-endpoint} is one of the supported service endpoints.

Path parameters

model string

Required. The fully qualified name of the publisher model or tuned model endpoint to use.

Publisher model format: projects/{project}/locations/{location}/publishers/*/models/*

Tuned model endpoint format: projects/{project}/locations/{location}/endpoints/{endpoint}

Request body

The request body contains data with the following structure:

Fields
contents[] object (Content)

Required. The content of the current conversation with the model.

For single-turn queries, this is a single instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request.

cachedContent string

Optional. The name of the cached content used as context to serve the prediction. Note: only used in explicit caching, where users can have control over caching (e.g. what content to cache) and enjoy guaranteed cost savings. Format: projects/{project}/locations/{location}/cachedContents/{cachedContent}

tools[] object (Tool)

Optional. A list of Tools the model may use to generate the next response.

A Tool is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.

toolConfig object (ToolConfig)

Optional. Tool config. This config is shared for all tools provided in the request.

labels map (key: string, value: string)

Optional. The labels with user-defined metadata for the request. It is used for billing and reporting only.

label keys and values can be no longer than 63 characters (Unicode codepoints) and can only contain lowercase letters, numeric characters, underscores, and dashes. International characters are allowed. label values are optional. label keys must start with a letter.

safetySettings[] object (SafetySetting)

Optional. Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates.

generationConfig object (GenerationConfig)

Optional. Generation config.

systemInstruction object (Content)

Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph.

Example request

Text

Java

import com.google.cloud.vertexai.VertexAI;
import com.google.cloud.vertexai.api.GenerateContentResponse;
import com.google.cloud.vertexai.generativeai.GenerativeModel;
import com.google.cloud.vertexai.generativeai.ResponseHandler;

public class QuestionAnswer {

  public static void main(String[] args) throws Exception {
    // TODO(developer): Replace these variables before running the sample.
    String projectId = "your-google-cloud-project-id";
    String location = "us-central1";
    String modelName = "gemini-1.5-flash-001";

    String output = simpleQuestion(projectId, location, modelName);
    System.out.println(output);
  }

  // Asks a question to the specified Vertex AI Gemini model and returns the generated answer.
  public static String simpleQuestion(String projectId, String location, String modelName)
      throws Exception {
    // Initialize client that will be used to send requests.
    // This client only needs to be created once, and can be reused for multiple requests.
    try (VertexAI vertexAI = new VertexAI(projectId, location)) {
      String output;
      GenerativeModel model = new GenerativeModel(modelName, vertexAI);
      // Send the question to the model for processing.
      GenerateContentResponse response = model.generateContent("Why is the sky blue?");
      // Extract the generated text from the model's response.
      output = ResponseHandler.getText(response);
      return output;
    }
  }
}

Node.js

const {VertexAI} = require('@google-cloud/vertexai');

/**
 * TODO(developer): Update these variables before running the sample.
 */
const PROJECT_ID = process.env.CAIP_PROJECT_ID;
const LOCATION = process.env.LOCATION;
const MODEL = 'gemini-1.5-flash-001';

async function generateContent() {
  // Initialize Vertex with your Cloud project and location
  const vertexAI = new VertexAI({project: PROJECT_ID, location: LOCATION});

  // Instantiate the model
  const generativeModel = vertexAI.getGenerativeModel({
    model: MODEL,
  });

  const request = {
    contents: [
      {
        role: 'user',
        parts: [
          {
            text: 'Write a story about a magic backpack.',
          },
        ],
      },
    ],
  };

  console.log(JSON.stringify(request));

  const result = await generativeModel.generateContent(request);

  console.log(result.response.candidates[0].content.parts[0].text);
}

Python

import vertexai

from vertexai.generative_models import GenerativeModel

vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel("gemini-1.5-flash-002")
response = model.generate_content("Write a story about a magic backpack.")

print(response.text)

Multi-modal

Java

import com.google.cloud.vertexai.VertexAI;
import com.google.cloud.vertexai.api.GenerateContentResponse;
import com.google.cloud.vertexai.generativeai.ContentMaker;
import com.google.cloud.vertexai.generativeai.GenerativeModel;
import com.google.cloud.vertexai.generativeai.PartMaker;
import com.google.cloud.vertexai.generativeai.ResponseHandler;

public class Multimodal {
  public static void main(String[] args) throws Exception {
    // TODO(developer): Replace these variables before running the sample.
    String projectId = "your-google-cloud-project-id";
    String location = "us-central1";
    String modelName = "gemini-1.5-flash-001";

    String output = nonStreamingMultimodal(projectId, location, modelName);
    System.out.println(output);
  }

  // Ask a simple question and get the response.
  public static String nonStreamingMultimodal(String projectId, String location, String modelName)
      throws Exception {
    // Initialize client that will be used to send requests.
    // This client only needs to be created once, and can be reused for multiple requests.
    try (VertexAI vertexAI = new VertexAI(projectId, location)) {
      GenerativeModel model = new GenerativeModel(modelName, vertexAI);

      String videoUri = "gs://cloud-samples-data/video/animals.mp4";
      String imgUri = "gs://cloud-samples-data/generative-ai/image/character.jpg";

      // Get the response from the model.
      GenerateContentResponse response = model.generateContent(
          ContentMaker.fromMultiModalData(
              PartMaker.fromMimeTypeAndData("video/mp4", videoUri),
              PartMaker.fromMimeTypeAndData("image/jpeg", imgUri),
              "Are this video and image correlated?"
          ));

      // Extract the generated text from the model's response.
      String output = ResponseHandler.getText(response);
      return output;
    }
  }
}

Node.js

const {VertexAI} = require('@google-cloud/vertexai');

/**
 * TODO(developer): Update these variables before running the sample.
 */
const PROJECT_ID = process.env.CAIP_PROJECT_ID;
const LOCATION = 'us-central1';
const MODEL = 'gemini-1.5-flash-001';

async function generateContent() {
  // Initialize Vertex AI
  const vertexAI = new VertexAI({project: PROJECT_ID, location: LOCATION});
  const generativeModel = vertexAI.getGenerativeModel({model: MODEL});

  const request = {
    contents: [
      {
        role: 'user',
        parts: [
          {
            file_data: {
              file_uri: 'gs://cloud-samples-data/video/animals.mp4',
              mime_type: 'video/mp4',
            },
          },
          {
            file_data: {
              file_uri:
                'gs://cloud-samples-data/generative-ai/image/character.jpg',
              mime_type: 'image/jpeg',
            },
          },
          {text: 'Are this video and image correlated?'},
        ],
      },
    ],
  };

  const result = await generativeModel.generateContent(request);

  console.log(result.response.candidates[0].content.parts[0].text);
}

Python

import vertexai

from vertexai.generative_models import GenerativeModel, Part

vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel("gemini-1.5-flash-002")
response = model.generate_content(
    [
        Part.from_uri(
            "gs://cloud-samples-data/generative-ai/video/animals.mp4", "video/mp4"
        ),
        Part.from_uri(
            "gs://cloud-samples-data/generative-ai/image/character.jpg",
            "image/jpeg",
        ),
        "Are these video and image correlated?",
    ]
)

print(response.text)

Advanced Use Case

Go

import (
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"io"

	"cloud.google.com/go/vertexai/genai"
)

// functionCallsChat opens a chat session and sends 4 messages to the model:
// - convert a first text question into a structured function call request
// - convert the first structured function call response into natural language
// - convert a second text question into a structured function call request
// - convert the second structured function call response into natural language
func functionCallsChat(w io.Writer, projectID, location, modelName string) error {
	// location := "us-central1"
	// modelName := "gemini-1.5-flash-001"
	ctx := context.Background()
	client, err := genai.NewClient(ctx, projectID, location)
	if err != nil {
		return fmt.Errorf("unable to create client: %w", err)
	}
	defer client.Close()

	model := client.GenerativeModel(modelName)

	// Build an OpenAPI schema, in memory
	paramsProduct := &genai.Schema{
		Type: genai.TypeObject,
		Properties: map[string]*genai.Schema{
			"productName": {
				Type:        genai.TypeString,
				Description: "Product name",
			},
		},
	}
	fundeclProductInfo := &genai.FunctionDeclaration{
		Name:        "getProductSku",
		Description: "Get the SKU for a product",
		Parameters:  paramsProduct,
	}
	paramsStore := &genai.Schema{
		Type: genai.TypeObject,
		Properties: map[string]*genai.Schema{
			"location": {
				Type:        genai.TypeString,
				Description: "Location",
			},
		},
	}
	fundeclStoreLocation := &genai.FunctionDeclaration{
		Name:        "getStoreLocation",
		Description: "Get the location of the closest store",
		Parameters:  paramsStore,
	}
	model.Tools = []*genai.Tool{
		{FunctionDeclarations: []*genai.FunctionDeclaration{
			fundeclProductInfo,
			fundeclStoreLocation,
		}},
	}
	model.SetTemperature(0.0)

	chat := model.StartChat()

	// Send a prompt for the first conversation turn that should invoke the getProductSku function
	prompt := "Do you have the Pixel 8 Pro in stock?"
	fmt.Fprintf(w, "Question: %s\n", prompt)
	resp, err := chat.SendMessage(ctx, genai.Text(prompt))
	if err != nil {
		return err
	}
	if len(resp.Candidates) == 0 ||
		len(resp.Candidates[0].Content.Parts) == 0 {
		return errors.New("empty response from model")
	}

	// The model has returned a function call to the declared function `getProductSku`
	// with a value for the argument `productName`.
	jsondata, err := json.MarshalIndent(resp.Candidates[0].Content.Parts[0], "\t", "  ")
	if err != nil {
		return fmt.Errorf("json.MarshalIndent: %w", err)
	}
	fmt.Fprintf(w, "function call generated by the model:\n\t%s\n", string(jsondata))

	// Create a function call response, to simulate the result of a call to a
	// real service
	funresp := &genai.FunctionResponse{
		Name: "getProductSku",
		Response: map[string]any{
			"sku":      "GA04834-US",
			"in_stock": "yes",
		},
	}
	jsondata, err = json.MarshalIndent(funresp, "\t", "  ")
	if err != nil {
		return fmt.Errorf("json.MarshalIndent: %w", err)
	}
	fmt.Fprintf(w, "function call response sent to the model:\n\t%s\n\n", string(jsondata))

	// And provide the function call response to the model
	resp, err = chat.SendMessage(ctx, funresp)
	if err != nil {
		return err
	}
	if len(resp.Candidates) == 0 ||
		len(resp.Candidates[0].Content.Parts) == 0 {
		return errors.New("empty response from model")
	}

	// The model has taken the function call response as input, and has
	// reformulated the response to the user.
	jsondata, err = json.MarshalIndent(resp.Candidates[0].Content.Parts[0], "\t", "  ")
	if err != nil {
		return fmt.Errorf("json.MarshalIndent: %w", err)
	}
	fmt.Fprintf(w, "Answer generated by the model:\n\t%s\n\n", string(jsondata))

	// Send a prompt for the second conversation turn that should invoke the getStoreLocation function
	prompt2 := "Is there a store in Mountain View, CA that I can visit to try it out?"
	fmt.Fprintf(w, "Question: %s\n", prompt)

	resp, err = chat.SendMessage(ctx, genai.Text(prompt2))
	if err != nil {
		return err
	}
	if len(resp.Candidates) == 0 ||
		len(resp.Candidates[0].Content.Parts) == 0 {
		return errors.New("empty response from model")
	}

	// The model has returned a function call to the declared function `getStoreLocation`
	// with a value for the argument `store`.
	jsondata, err = json.MarshalIndent(resp.Candidates[0].Content.Parts[0], "\t", "  ")
	if err != nil {
		return fmt.Errorf("json.MarshalIndent: %w", err)
	}
	fmt.Fprintf(w, "function call generated by the model:\n\t%s\n", string(jsondata))

	// Create a function call response, to simulate the result of a call to a
	// real service
	funresp = &genai.FunctionResponse{
		Name: "getStoreLocation",
		Response: map[string]any{
			"store": "2000 N Shoreline Blvd, Mountain View, CA 94043, US",
		},
	}
	jsondata, err = json.MarshalIndent(funresp, "\t", "  ")
	if err != nil {
		return fmt.Errorf("json.MarshalIndent: %w", err)
	}
	fmt.Fprintf(w, "function call response sent to the model:\n\t%s\n\n", string(jsondata))

	// And provide the function call response to the model
	resp, err = chat.SendMessage(ctx, funresp)
	if err != nil {
		return err
	}
	if len(resp.Candidates) == 0 ||
		len(resp.Candidates[0].Content.Parts) == 0 {
		return errors.New("empty response from model")
	}

	// The model has taken the function call response as input, and has
	// reformulated the response to the user.
	jsondata, err = json.MarshalIndent(resp.Candidates[0].Content.Parts[0], "\t", "  ")
	if err != nil {
		return fmt.Errorf("json.MarshalIndent: %w", err)
	}
	fmt.Fprintf(w, "Answer generated by the model:\n\t%s\n\n", string(jsondata))
	return nil
}

Node.js

const {
  VertexAI,
  FunctionDeclarationSchemaType,
} = require('@google-cloud/vertexai');

const functionDeclarations = [
  {
    function_declarations: [
      {
        name: 'get_product_sku',
        description:
          'Get the available inventory for a Google products, e.g: Pixel phones, Pixel Watches, Google Home etc',
        parameters: {
          type: FunctionDeclarationSchemaType.OBJECT,
          properties: {
            productName: {type: FunctionDeclarationSchemaType.STRING},
          },
        },
      },
      {
        name: 'get_store_location',
        description: 'Get the location of the closest store',
        parameters: {
          type: FunctionDeclarationSchemaType.OBJECT,
          properties: {
            location: {type: FunctionDeclarationSchemaType.STRING},
          },
        },
      },
    ],
  },
];

const toolConfig = {
  function_calling_config: {
    mode: 'ANY',
    allowed_function_names: ['get_product_sku'],
  },
};

const generationConfig = {
  temperature: 0.95,
  topP: 1.0,
  maxOutputTokens: 8192,
};

/**
 * TODO(developer): Update these variables before running the sample.
 */
async function functionCallingAdvanced(
  projectId = 'PROJECT_ID',
  location = 'us-central1',
  model = 'gemini-1.5-flash-001'
) {
  // Initialize Vertex with your Cloud project and location
  const vertexAI = new VertexAI({project: projectId, location: location});

  // Instantiate the model
  const generativeModel = vertexAI.preview.getGenerativeModel({
    model: model,
  });

  const request = {
    contents: [
      {
        role: 'user',
        parts: [
          {text: 'Do you have the White Pixel 8 Pro 128GB in stock in the US?'},
        ],
      },
    ],
    tools: functionDeclarations,
    tool_config: toolConfig,
    generation_config: generationConfig,
  };
  const result = await generativeModel.generateContent(request);
  console.log(JSON.stringify(result.response.candidates[0].content));
}

Ground with public data

Java

import com.google.cloud.vertexai.VertexAI;
import com.google.cloud.vertexai.api.GenerateContentResponse;
import com.google.cloud.vertexai.api.GoogleSearchRetrieval;
import com.google.cloud.vertexai.api.GroundingMetadata;
import com.google.cloud.vertexai.api.Tool;
import com.google.cloud.vertexai.generativeai.GenerativeModel;
import com.google.cloud.vertexai.generativeai.ResponseHandler;
import java.io.IOException;
import java.util.Collections;

public class GroundingWithPublicData {
  public static void main(String[] args) throws IOException {
    // TODO(developer): Replace these variables before running the sample.
    String projectId = "your-google-cloud-project-id";
    String location = "us-central1";
    String modelName = "gemini-1.5-flash-001";

    groundWithPublicData(projectId, location, modelName);
  }

  // A request whose response will be "grounded" with information found in Google Search.
  public static String groundWithPublicData(String projectId, String location, String modelName)
      throws IOException {
    // Initialize client that will be used to send requests.
    // This client only needs to be created once, and can be reused for multiple requests.
    try (VertexAI vertexAI = new VertexAI(projectId, location)) {
      Tool googleSearchTool =
          Tool.newBuilder()
              .setGoogleSearchRetrieval(
                  // Enable using the result from this tool in detecting grounding
                  GoogleSearchRetrieval.newBuilder())
              .build();

      GenerativeModel model =
          new GenerativeModel(modelName, vertexAI)
              .withTools(Collections.singletonList(googleSearchTool));

      GenerateContentResponse response = model.generateContent("Why is the sky blue?");

      GroundingMetadata groundingMetadata = response.getCandidates(0).getGroundingMetadata();
      String answer = ResponseHandler.getText(response);

      System.out.println("Answer: " + answer);
      System.out.println("Grounding metadata: " + groundingMetadata);

      return answer;
    }
  }
}

Node.js

const {VertexAI} = require('@google-cloud/vertexai');

/**
 * TODO(developer): Update these variables before running the sample.
 */
async function generateContentWithGoogleSearchGrounding(
  projectId = 'PROJECT_ID',
  location = 'us-central1',
  model = 'gemini-1.5-flash-001'
) {
  // Initialize Vertex with your Cloud project and location
  const vertexAI = new VertexAI({project: projectId, location: location});

  const generativeModelPreview = vertexAI.preview.getGenerativeModel({
    model: model,
    generationConfig: {maxOutputTokens: 256},
  });

  const googleSearchRetrievalTool = {
    googleSearchRetrieval: {},
  };

  const request = {
    contents: [{role: 'user', parts: [{text: 'Why is the sky blue?'}]}],
    tools: [googleSearchRetrievalTool],
  };

  const result = await generativeModelPreview.generateContent(request);
  const response = await result.response;
  const groundingMetadata = response.candidates[0].groundingMetadata;
  console.log(
    'Response: ',
    JSON.stringify(response.candidates[0].content.parts[0].text)
  );
  console.log('GroundingMetadata is: ', JSON.stringify(groundingMetadata));
}

Ground with private data

Java

import com.google.cloud.vertexai.VertexAI;
import com.google.cloud.vertexai.api.GenerateContentResponse;
import com.google.cloud.vertexai.api.GroundingMetadata;
import com.google.cloud.vertexai.api.Retrieval;
import com.google.cloud.vertexai.api.Tool;
import com.google.cloud.vertexai.api.VertexAISearch;
import com.google.cloud.vertexai.generativeai.GenerativeModel;
import com.google.cloud.vertexai.generativeai.ResponseHandler;
import java.io.IOException;
import java.util.Collections;

public class GroundingWithPrivateData {
  public static void main(String[] args) throws IOException {
    // TODO(developer): Replace these variables before running the sample.
    String projectId = "your-google-cloud-project-id";
    String location = "us-central1";
    String modelName = "gemini-1.5-flash-001";
    String datastore = String.format(
        "projects/%s/locations/global/collections/default_collection/dataStores/%s",
        projectId, "datastore_id");

    groundWithPrivateData(projectId, location, modelName, datastore);
  }

  // A request whose response will be "grounded"
  // with information found in Vertex AI Search datastores.
  public static String groundWithPrivateData(String projectId, String location, String modelName,
                                             String datastoreId)
      throws IOException {
    // Initialize client that will be used to send requests.
    // This client only needs to be created once, and can be reused for multiple requests.
    try (VertexAI vertexAI = new VertexAI(projectId, location)) {
      Tool datastoreTool = Tool.newBuilder()
          .setRetrieval(
              Retrieval.newBuilder()
                  .setVertexAiSearch(VertexAISearch.newBuilder().setDatastore(datastoreId))
                  .setDisableAttribution(false))
          .build();

      GenerativeModel model = new GenerativeModel(modelName, vertexAI).withTools(
          Collections.singletonList(datastoreTool)
      );

      GenerateContentResponse response = model.generateContent(
          "How do I make an appointment to renew my driver's license?");

      GroundingMetadata groundingMetadata = response.getCandidates(0).getGroundingMetadata();
      String answer = ResponseHandler.getText(response);

      System.out.println("Answer: " + answer);
      System.out.println("Grounding metadata: " + groundingMetadata);

      return answer;
    }
  }
}

Node.js

const {
  VertexAI,
  HarmCategory,
  HarmBlockThreshold,
} = require('@google-cloud/vertexai');

/**
 * TODO(developer): Update these variables before running the sample.
 */
async function generateContentWithVertexAISearchGrounding(
  projectId = 'PROJECT_ID',
  location = 'us-central1',
  model = 'gemini-1.5-flash-001',
  dataStoreId = 'DATASTORE_ID'
) {
  // Initialize Vertex with your Cloud project and location
  const vertexAI = new VertexAI({project: projectId, location: location});

  const generativeModelPreview = vertexAI.preview.getGenerativeModel({
    model: model,
    // The following parameters are optional
    // They can also be passed to individual content generation requests
    safetySettings: [
      {
        category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
        threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
      },
    ],
    generationConfig: {maxOutputTokens: 256},
  });

  const vertexAIRetrievalTool = {
    retrieval: {
      vertexAiSearch: {
        datastore: `projects/${projectId}/locations/global/collections/default_collection/dataStores/${dataStoreId}`,
      },
      disableAttribution: false,
    },
  };

  const request = {
    contents: [{role: 'user', parts: [{text: 'Why is the sky blue?'}]}],
    tools: [vertexAIRetrievalTool],
  };

  const result = await generativeModelPreview.generateContent(request);
  const response = result.response;
  const groundingMetadata = response.candidates[0];
  console.log('Response: ', JSON.stringify(response.candidates[0]));
  console.log('GroundingMetadata is: ', JSON.stringify(groundingMetadata));
}

Response body

If successful, the response body contains an instance of GenerateContentResponse.