Package Methods (1.75.0)

Summary of entries of Methods for aiplatform.

vertexai.init

init(
    *,
    project: typing.Optional[str] = None,
    location: typing.Optional[str] = None,
    experiment: typing.Optional[str] = None,
    experiment_description: typing.Optional[str] = None,
    experiment_tensorboard: typing.Optional[
        typing.Union[
            str,
            google.cloud.aiplatform.tensorboard.tensorboard_resource.Tensorboard,
            bool,
        ]
    ] = None,
    staging_bucket: typing.Optional[str] = None,
    credentials: typing.Optional[google.auth.credentials.Credentials] = None,
    encryption_spec_key_name: typing.Optional[str] = None,
    network: typing.Optional[str] = None,
    service_account: typing.Optional[str] = None,
    api_endpoint: typing.Optional[str] = None,
    api_key: typing.Optional[str] = None,
    api_transport: typing.Optional[str] = None,
    request_metadata: typing.Optional[typing.Sequence[typing.Tuple[str, str]]] = None
)

Updates common initialization parameters with provided options.

See more: vertexai.init

vertexai.preview.end_run

end_run(
    state: google.cloud.aiplatform_v1.types.execution.Execution.State = State.COMPLETE,
)

Ends the the current experiment run.

See more: vertexai.preview.end_run

vertexai.preview.get_experiment_df

get_experiment_df(
    experiment: typing.Optional[str] = None, *, include_time_series: bool = True
) -> pd.DataFrame

Returns a Pandas DataFrame of the parameters and metrics associated with one experiment.

See more: vertexai.preview.get_experiment_df

vertexai.preview.log_classification_metrics

log_classification_metrics(
    *,
    labels: typing.Optional[typing.List[str]] = None,
    matrix: typing.Optional[typing.List[typing.List[int]]] = None,
    fpr: typing.Optional[typing.List[float]] = None,
    tpr: typing.Optional[typing.List[float]] = None,
    threshold: typing.Optional[typing.List[float]] = None,
    display_name: typing.Optional[str] = None
) -> (
    google.cloud.aiplatform.metadata.schema.google.artifact_schema.ClassificationMetrics
)

Create an artifact for classification metrics and log to ExperimentRun.

See more: vertexai.preview.log_classification_metrics

vertexai.preview.log_metrics

log_metrics(metrics: typing.Dict[str, typing.Union[float, int, str]])

Log single or multiple Metrics with specified key and value pairs.

See more: vertexai.preview.log_metrics

vertexai.preview.log_params

log_params(params: typing.Dict[str, typing.Union[float, int, str]])

Log single or multiple parameters with specified key and value pairs.

See more: vertexai.preview.log_params

vertexai.preview.log_time_series_metrics

log_time_series_metrics(
    metrics: typing.Dict[str, float],
    step: typing.Optional[int] = None,
    wall_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None,
)

Logs time series metrics to to this Experiment Run.

See more: vertexai.preview.log_time_series_metrics

vertexai.preview.start_run

start_run(
    run: str,
    *,
    tensorboard: typing.Optional[
        typing.Union[
            google.cloud.aiplatform.tensorboard.tensorboard_resource.Tensorboard, str
        ]
    ] = None,
    resume=False
) -> google.cloud.aiplatform.metadata.experiment_run_resource.ExperimentRun

Start a run to current session.

See more: vertexai.preview.start_run

vertexai.preview.prompts.create_version

create_version(
    prompt: vertexai.prompts._prompts.Prompt,
    prompt_id: typing.Optional[str] = None,
    version_name: typing.Optional[str] = None,
) -> vertexai.prompts._prompts.Prompt

Creates a Prompt or Prompt Version in the online prompt store .

See more: vertexai.preview.prompts.create_version

vertexai.preview.prompts.delete

delete(prompt_id: str) -> None

Deletes the online prompt resource associated with the prompt id.

See more: vertexai.preview.prompts.delete

vertexai.preview.prompts.get

get(
    prompt_id: str, version_id: typing.Optional[str] = None
) -> vertexai.prompts._prompts.Prompt

Creates a Prompt object from an online resource.

See more: vertexai.preview.prompts.get

vertexai.preview.prompts.list

list() -> list[vertexai.prompts._prompt_management.PromptMetadata]

Lists all prompt resources in the online prompt store associated with the project.

See more: vertexai.preview.prompts.list

vertexai.preview.prompts.list_versions

list_versions(
    prompt_id: str,
) -> list[vertexai.prompts._prompt_management.PromptVersionMetadata]

Returns a list of PromptVersionMetadata objects for the prompt resource.

See more: vertexai.preview.prompts.list_versions

vertexai.preview.prompts.restore_version

restore_version(
    prompt_id: str, version_id: str
) -> vertexai.prompts._prompt_management.PromptVersionMetadata

Restores a previous version of the prompt resource and loads that version into the current Prompt object.

See more: vertexai.preview.prompts.restore_version

vertexai.preview.tuning.sft.rebase_tuned_model

rebase_tuned_model(
    tuned_model_ref: str,
    *,
    artifact_destination: typing.Optional[str] = None,
    deploy_to_same_endpoint: typing.Optional[bool] = False
)

Re-runs fine tuning on top of a new foundational model.

See more: vertexai.preview.tuning.sft.rebase_tuned_model

vertexai.preview.tuning.sft.train

train(
    *,
    source_model: typing.Union[str, vertexai.generative_models.GenerativeModel],
    train_dataset: str,
    validation_dataset: typing.Optional[str] = None,
    tuned_model_display_name: typing.Optional[str] = None,
    epochs: typing.Optional[int] = None,
    learning_rate_multiplier: typing.Optional[float] = None,
    adapter_size: typing.Optional[typing.Literal[1, 4, 8, 16]] = None,
    labels: typing.Optional[typing.Dict[str, str]] = None
) -> vertexai.tuning._supervised_tuning.SupervisedTuningJob

Tunes a model using supervised training.

See more: vertexai.preview.tuning.sft.train

vertexai.prompts._prompt_management.create_version

create_version(
    prompt: vertexai.prompts._prompts.Prompt,
    prompt_id: typing.Optional[str] = None,
    version_name: typing.Optional[str] = None,
) -> vertexai.prompts._prompts.Prompt

Creates a Prompt or Prompt Version in the online prompt store .

See more: vertexai.prompts._prompt_management.create_version

vertexai.prompts._prompt_management.delete

delete(prompt_id: str) -> None

Deletes the online prompt resource associated with the prompt id.

See more: vertexai.prompts._prompt_management.delete

vertexai.prompts._prompt_management.get

get(
    prompt_id: str, version_id: typing.Optional[str] = None
) -> vertexai.prompts._prompts.Prompt

Creates a Prompt object from an online resource.

See more: vertexai.prompts._prompt_management.get

vertexai.prompts._prompt_management.list_prompts

list_prompts() -> list[vertexai.prompts._prompt_management.PromptMetadata]

Lists all prompt resources in the online prompt store associated with the project.

See more: vertexai.prompts._prompt_management.list_prompts

vertexai.prompts._prompt_management.list_versions

list_versions(
    prompt_id: str,
) -> list[vertexai.prompts._prompt_management.PromptVersionMetadata]

Returns a list of PromptVersionMetadata objects for the prompt resource.

See more: vertexai.prompts._prompt_management.list_versions

vertexai.prompts._prompt_management.restore_version

restore_version(
    prompt_id: str, version_id: str
) -> vertexai.prompts._prompt_management.PromptVersionMetadata

Restores a previous version of the prompt resource and loads that version into the current Prompt object.

See more: vertexai.prompts._prompt_management.restore_version

vertexai.evaluation.CustomMetric

CustomMetric(
    name: str,
    metric_function: typing.Callable[
        [typing.Dict[str, typing.Any]], typing.Dict[str, typing.Any]
    ],
)

Initializes the evaluation metric.

See more: vertexai.evaluation.CustomMetric

vertexai.evaluation.EvalTask

EvalTask(
    *,
    dataset: typing.Union[pd.DataFrame, str, typing.Dict[str, typing.Any]],
    metrics: typing.List[
        typing.Union[
            typing.Literal[
                "exact_match",
                "bleu",
                "rouge_1",
                "rouge_2",
                "rouge_l",
                "rouge_l_sum",
                "tool_call_valid",
                "tool_name_match",
                "tool_parameter_key_match",
                "tool_parameter_kv_match",
            ],
            vertexai.evaluation.CustomMetric,
            vertexai.evaluation.metrics._base._AutomaticMetric,
            vertexai.evaluation.metrics._base._TranslationMetric,
            vertexai.evaluation.metrics.pointwise_metric.PointwiseMetric,
            vertexai.evaluation.metrics.pairwise_metric.PairwiseMetric,
        ]
    ],
    experiment: typing.Optional[str] = None,
    metric_column_mapping: typing.Optional[typing.Dict[str, str]] = None,
    output_uri_prefix: typing.Optional[str] = ""
)

Initializes an EvalTask.

See more: vertexai.evaluation.EvalTask

vertexai.evaluation.EvalTask.display_runs

display_runs()

Displays experiment runs associated with this EvalTask.

See more: vertexai.evaluation.EvalTask.display_runs

vertexai.evaluation.EvalTask.evaluate

evaluate(
    *,
    model: typing.Optional[
        typing.Union[
            vertexai.generative_models.GenerativeModel, typing.Callable[[str], str]
        ]
    ] = None,
    prompt_template: typing.Optional[str] = None,
    experiment_run_name: typing.Optional[str] = None,
    response_column_name: typing.Optional[str] = None,
    baseline_model_response_column_name: typing.Optional[str] = None,
    evaluation_service_qps: typing.Optional[float] = None,
    retry_timeout: float = 120.0,
    output_file_name: typing.Optional[str] = None
) -> vertexai.evaluation.EvalResult

Runs an evaluation for the EvalTask.

See more: vertexai.evaluation.EvalTask.evaluate

vertexai.evaluation.MetricPromptTemplateExamples.get_prompt_template

get_prompt_template(metric_name: str) -> str

Returns the prompt template for the given metric name.

See more: vertexai.evaluation.MetricPromptTemplateExamples.get_prompt_template

vertexai.evaluation.MetricPromptTemplateExamples.list_example_metric_names

list_example_metric_names() -> typing.List[str]

Returns a list of all metric prompt templates.

See more: vertexai.evaluation.MetricPromptTemplateExamples.list_example_metric_names

vertexai.evaluation.PairwiseMetric

PairwiseMetric(
    *,
    metric: str,
    metric_prompt_template: typing.Union[
        vertexai.evaluation.metrics.metric_prompt_template.PairwiseMetricPromptTemplate,
        str,
    ],
    baseline_model: typing.Optional[
        typing.Union[
            vertexai.generative_models.GenerativeModel, typing.Callable[[str], str]
        ]
    ] = None
)

Initializes a pairwise evaluation metric.

See more: vertexai.evaluation.PairwiseMetric

vertexai.evaluation.PairwiseMetricPromptTemplate

PairwiseMetricPromptTemplate(
    *,
    criteria: typing.Dict[str, str],
    rating_rubric: typing.Dict[str, str],
    input_variables: typing.Optional[typing.List[str]] = None,
    instruction: typing.Optional[str] = None,
    metric_definition: typing.Optional[str] = None,
    evaluation_steps: typing.Optional[typing.Dict[str, str]] = None,
    few_shot_examples: typing.Optional[typing.List[str]] = None
)

Initializes a pairwise metric prompt template.

See more: vertexai.evaluation.PairwiseMetricPromptTemplate

vertexai.evaluation.PairwiseMetricPromptTemplate.__str__

__str__()

Serializes the pairwise metric prompt template to a string.

See more: vertexai.evaluation.PairwiseMetricPromptTemplate.str

vertexai.evaluation.PairwiseMetricPromptTemplate.assemble

assemble(**kwargs) -> vertexai.evaluation.prompt_template.PromptTemplate

Replaces only the provided variables in the template with specific values.

See more: vertexai.evaluation.PairwiseMetricPromptTemplate.assemble

vertexai.evaluation.PairwiseMetricPromptTemplate.get_default_pairwise_evaluation_steps

get_default_pairwise_evaluation_steps() -> typing.Dict[str, str]

Returns the default evaluation steps for the metric prompt template.

See more: vertexai.evaluation.PairwiseMetricPromptTemplate.get_default_pairwise_evaluation_steps

vertexai.evaluation.PairwiseMetricPromptTemplate.get_default_pairwise_instruction

get_default_pairwise_instruction() -> str

Returns the default instruction for the metric prompt template.

See more: vertexai.evaluation.PairwiseMetricPromptTemplate.get_default_pairwise_instruction

vertexai.evaluation.PointwiseMetric

PointwiseMetric(
    *,
    metric: str,
    metric_prompt_template: typing.Union[
        vertexai.evaluation.metrics.metric_prompt_template.PointwiseMetricPromptTemplate,
        str,
    ]
)

Initializes a pointwise evaluation metric.

See more: vertexai.evaluation.PointwiseMetric

vertexai.evaluation.PointwiseMetricPromptTemplate

PointwiseMetricPromptTemplate(
    *,
    criteria: typing.Dict[str, str],
    rating_rubric: typing.Dict[str, str],
    input_variables: typing.Optional[typing.List[str]] = None,
    instruction: typing.Optional[str] = None,
    metric_definition: typing.Optional[str] = None,
    evaluation_steps: typing.Optional[typing.Dict[str, str]] = None,
    few_shot_examples: typing.Optional[typing.List[str]] = None
)

Initializes a pointwise metric prompt template.

See more: vertexai.evaluation.PointwiseMetricPromptTemplate

vertexai.evaluation.PointwiseMetricPromptTemplate.__str__

__str__()

Serializes the pointwise metric prompt template to a string.

See more: vertexai.evaluation.PointwiseMetricPromptTemplate.str

vertexai.evaluation.PointwiseMetricPromptTemplate.assemble

assemble(**kwargs) -> vertexai.evaluation.prompt_template.PromptTemplate

Replaces only the provided variables in the template with specific values.

See more: vertexai.evaluation.PointwiseMetricPromptTemplate.assemble

vertexai.evaluation.PointwiseMetricPromptTemplate.get_default_pointwise_evaluation_steps

get_default_pointwise_evaluation_steps() -> typing.Dict[str, str]

Returns the default evaluation steps for the metric prompt template.

See more: vertexai.evaluation.PointwiseMetricPromptTemplate.get_default_pointwise_evaluation_steps

vertexai.evaluation.PointwiseMetricPromptTemplate.get_default_pointwise_instruction

get_default_pointwise_instruction() -> str

Returns the default instruction for the metric prompt template.

See more: vertexai.evaluation.PointwiseMetricPromptTemplate.get_default_pointwise_instruction

vertexai.evaluation.PromptTemplate

PromptTemplate(template: str)

Initializes the PromptTemplate with a given template.

See more: vertexai.evaluation.PromptTemplate

vertexai.evaluation.PromptTemplate.__repr__

__repr__() -> str

Returns a string representation of the PromptTemplate.

See more: vertexai.evaluation.PromptTemplate.repr

vertexai.evaluation.PromptTemplate.__str__

__str__() -> str

Returns the template string.

See more: vertexai.evaluation.PromptTemplate.str

vertexai.evaluation.PromptTemplate.assemble

assemble(**kwargs) -> vertexai.evaluation.prompt_template.PromptTemplate

Replaces only the provided variables in the template with specific values.

See more: vertexai.evaluation.PromptTemplate.assemble

vertexai.evaluation.Rouge

Rouge(
    *,
    rouge_type: typing.Literal[
        "rouge1",
        "rouge2",
        "rouge3",
        "rouge4",
        "rouge5",
        "rouge6",
        "rouge7",
        "rouge8",
        "rouge9",
        "rougeL",
        "rougeLsum",
    ],
    use_stemmer: bool = False,
    split_summaries: bool = False
)

Initializes the ROUGE metric.

See more: vertexai.evaluation.Rouge

vertexai.generative_models.ChatSession.send_message

vertexai.generative_models.ChatSession.send_message_async

Generates content asynchronously.

See more: vertexai.generative_models.ChatSession.send_message_async

vertexai.generative_models.FunctionDeclaration

FunctionDeclaration(
    *,
    name: str,
    parameters: typing.Dict[str, typing.Any],
    description: typing.Optional[str] = None,
    response: typing.Optional[typing.Dict[str, typing.Any]] = None
)

Constructs a FunctionDeclaration.

See more: vertexai.generative_models.FunctionDeclaration

vertexai.generative_models.GenerationConfig

GenerationConfig(
    *,
    temperature: typing.Optional[float] = None,
    top_p: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    candidate_count: typing.Optional[int] = None,
    max_output_tokens: typing.Optional[int] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    presence_penalty: typing.Optional[float] = None,
    frequency_penalty: typing.Optional[float] = None,
    response_mime_type: typing.Optional[str] = None,
    response_schema: typing.Optional[typing.Dict[str, typing.Any]] = None,
    seed: typing.Optional[int] = None,
    audio_timestamp: typing.Optional[bool] = None,
    routing_config: typing.Optional[RoutingConfig] = None,
    logprobs: typing.Optional[int] = None,
    response_logprobs: typing.Optional[bool] = None,
    response_modalities: typing.Optional[typing.List[GenerationConfig.Modality]] = None
)

Constructs a GenerationConfig object.

See more: vertexai.generative_models.GenerationConfig

vertexai.generative_models.GenerationConfig.RoutingConfig.AutoRoutingMode

AutoRoutingMode(
    *,
    model_routing_preference: google.cloud.aiplatform_v1beta1.types.content.GenerationConfig.RoutingConfig.AutoRoutingMode.ModelRoutingPreference
)

vertexai.generative_models.GenerationConfig.RoutingConfig.ManualRoutingMode

ManualRoutingMode(*, model_name: str)

vertexai.generative_models.GenerativeModel.compute_tokens

compute_tokens(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ]
) -> google.cloud.aiplatform_v1beta1.types.llm_utility_service.ComputeTokensResponse

vertexai.generative_models.GenerativeModel.compute_tokens_async

compute_tokens_async(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ]
) -> google.cloud.aiplatform_v1beta1.types.llm_utility_service.ComputeTokensResponse

Computes tokens asynchronously.

See more: vertexai.generative_models.GenerativeModel.compute_tokens_async

vertexai.generative_models.GenerativeModel.count_tokens

count_tokens(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None
) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponse

vertexai.generative_models.GenerativeModel.count_tokens_async

count_tokens_async(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None
) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponse

Counts tokens asynchronously.

See more: vertexai.generative_models.GenerativeModel.count_tokens_async

vertexai.generative_models.GenerativeModel.generate_content

generate_content(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[vertexai.generative_models._generative_models.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None,
    labels: typing.Optional[typing.Dict[str, str]] = None,
    stream: bool = False
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse],
]

vertexai.generative_models.GenerativeModel.generate_content_async

generate_content_async(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[vertexai.generative_models._generative_models.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None,
    labels: typing.Optional[typing.Dict[str, str]] = None,
    stream: bool = False
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.AsyncIterable[
        vertexai.generative_models._generative_models.GenerationResponse
    ],
]

Generates content asynchronously.

See more: vertexai.generative_models.GenerativeModel.generate_content_async

vertexai.generative_models.GenerativeModel.start_chat

start_chat(
    *,
    history: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Content]
    ] = None,
    response_validation: bool = True
) -> vertexai.generative_models._generative_models.ChatSession

Creates a stateful chat session.

See more: vertexai.generative_models.GenerativeModel.start_chat

vertexai.generative_models.Image.from_bytes

from_bytes(data: bytes) -> vertexai.generative_models._generative_models.Image

Loads image from image bytes.

See more: vertexai.generative_models.Image.from_bytes

vertexai.generative_models.Image.load_from_file

load_from_file(
    location: str,
) -> vertexai.generative_models._generative_models.Image

Loads image from file.

See more: vertexai.generative_models.Image.load_from_file

vertexai.generative_models.ResponseValidationError.with_traceback

Exception.with_traceback(tb) -- set self.traceback to tb and return self.

See more: vertexai.generative_models.ResponseValidationError.with_traceback

vertexai.generative_models.SafetySetting

SafetySetting(
    *,
    category: google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
    threshold: google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
    method: typing.Optional[
        google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockMethod
    ] = None
)

Safety settings.

See more: vertexai.generative_models.SafetySetting

vertexai.generative_models.grounding.DynamicRetrievalConfig

DynamicRetrievalConfig(
    mode: google.cloud.aiplatform_v1beta1.types.tool.DynamicRetrievalConfig.Mode = Mode.MODE_UNSPECIFIED,
    dynamic_threshold: typing.Optional[float] = None,
)

Initializes a DynamicRetrievalConfig.

See more: vertexai.generative_models.grounding.DynamicRetrievalConfig

vertexai.generative_models.grounding.GoogleSearchRetrieval

GoogleSearchRetrieval(
    dynamic_retrieval_config: typing.Optional[
        vertexai.generative_models._generative_models.grounding.DynamicRetrievalConfig
    ] = None,
)

Initializes a Google Search Retrieval tool.

See more: vertexai.generative_models.grounding.GoogleSearchRetrieval

vertexai.generative_models.grounding.Retrieval

Retrieval(
    source: vertexai.generative_models._generative_models.grounding.VertexAISearch,
    disable_attribution: typing.Optional[bool] = None,
)

Initializes a Retrieval tool.

See more: vertexai.generative_models.grounding.Retrieval

vertexai.generative_models.grounding.VertexAISearch

VertexAISearch(
    datastore: str,
    *,
    project: typing.Optional[str] = None,
    location: typing.Optional[str] = None
)

Initializes a Vertex AI Search tool.

See more: vertexai.generative_models.grounding.VertexAISearch

vertexai.language_models.ChatModel

ChatModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a LanguageModel.

See more: vertexai.language_models.ChatModel

vertexai.language_models.ChatModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

Loads a _ModelGardenModel.

See more: vertexai.language_models.ChatModel.from_pretrained

vertexai.language_models.ChatModel.get_tuned_model

get_tuned_model(
    tuned_model_name: str,
) -> vertexai.language_models._language_models._LanguageModel

Loads the specified tuned language model.

See more: vertexai.language_models.ChatModel.get_tuned_model

vertexai.language_models.ChatModel.list_tuned_model_names

list_tuned_model_names() -> typing.Sequence[str]

Lists the names of tuned models.

See more: vertexai.language_models.ChatModel.list_tuned_model_names

vertexai.language_models.ChatModel.start_chat

start_chat(
    *,
    context: typing.Optional[str] = None,
    examples: typing.Optional[
        typing.List[vertexai.language_models.InputOutputTextPair]
    ] = None,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    top_p: typing.Optional[float] = None,
    message_history: typing.Optional[
        typing.List[vertexai.language_models.ChatMessage]
    ] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None
) -> vertexai.language_models.ChatSession

Starts a chat session with the model.

See more: vertexai.language_models.ChatModel.start_chat

vertexai.language_models.ChatModel.tune_model

tune_model(
    training_data: typing.Union[str, pandas.core.frame.DataFrame],
    *,
    train_steps: typing.Optional[int] = None,
    learning_rate_multiplier: typing.Optional[float] = None,
    tuning_job_location: typing.Optional[str] = None,
    tuned_model_location: typing.Optional[str] = None,
    model_display_name: typing.Optional[str] = None,
    default_context: typing.Optional[str] = None,
    accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None,
    tuning_evaluation_spec: typing.Optional[
        vertexai.language_models.TuningEvaluationSpec
    ] = None
) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models.ChatModel.tune_model

vertexai.language_models.ChatModel.tune_model_rlhf

tune_model_rlhf(
    *,
    prompt_data: typing.Union[str, pandas.core.frame.DataFrame],
    preference_data: typing.Union[str, pandas.core.frame.DataFrame],
    model_display_name: typing.Optional[str] = None,
    prompt_sequence_length: typing.Optional[int] = None,
    target_sequence_length: typing.Optional[int] = None,
    reward_model_learning_rate_multiplier: typing.Optional[float] = None,
    reinforcement_learning_rate_multiplier: typing.Optional[float] = None,
    reward_model_train_steps: typing.Optional[int] = None,
    reinforcement_learning_train_steps: typing.Optional[int] = None,
    kl_coeff: typing.Optional[float] = None,
    default_context: typing.Optional[str] = None,
    tuning_job_location: typing.Optional[str] = None,
    accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None,
    tuning_evaluation_spec: typing.Optional[
        vertexai.language_models.TuningEvaluationSpec
    ] = None
) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model using reinforcement learning from human feedback.

See more: vertexai.language_models.ChatModel.tune_model_rlhf

vertexai.language_models.ChatSession.send_message

send_message(
    message: str,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    top_p: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    candidate_count: typing.Optional[int] = None,
    grounding_source: typing.Optional[
        typing.Union[
            vertexai.language_models._language_models.WebSearch,
            vertexai.language_models._language_models.VertexAISearch,
            vertexai.language_models._language_models.InlineContext,
        ]
    ] = None
) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Sends message to the language model and gets a response.

See more: vertexai.language_models.ChatSession.send_message

vertexai.language_models.ChatSession.send_message_async

send_message_async(
    message: str,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    top_p: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    candidate_count: typing.Optional[int] = None,
    grounding_source: typing.Optional[
        typing.Union[
            vertexai.language_models._language_models.WebSearch,
            vertexai.language_models._language_models.VertexAISearch,
            vertexai.language_models._language_models.InlineContext,
        ]
    ] = None
) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Asynchronously sends message to the language model and gets a response.

See more: vertexai.language_models.ChatSession.send_message_async

vertexai.language_models.ChatSession.send_message_streaming

send_message_streaming(
    message: str,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    top_p: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None
) -> typing.Iterator[vertexai.language_models.TextGenerationResponse]

Sends message to the language model and gets a streamed response.

See more: vertexai.language_models.ChatSession.send_message_streaming

vertexai.language_models.ChatSession.send_message_streaming_async

send_message_streaming_async(
    message: str,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    top_p: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None
) -> typing.AsyncIterator[vertexai.language_models.TextGenerationResponse]

Asynchronously sends message to the language model and gets a streamed response.

See more: vertexai.language_models.ChatSession.send_message_streaming_async

vertexai.language_models.CodeChatModel

CodeChatModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a LanguageModel.

See more: vertexai.language_models.CodeChatModel

vertexai.language_models.CodeChatModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

Loads a _ModelGardenModel.

See more: vertexai.language_models.CodeChatModel.from_pretrained

vertexai.language_models.CodeChatModel.get_tuned_model

get_tuned_model(
    tuned_model_name: str,
) -> vertexai.language_models._language_models._LanguageModel

Loads the specified tuned language model.

See more: vertexai.language_models.CodeChatModel.get_tuned_model

vertexai.language_models.CodeChatModel.list_tuned_model_names

list_tuned_model_names() -> typing.Sequence[str]

Lists the names of tuned models.

See more: vertexai.language_models.CodeChatModel.list_tuned_model_names

vertexai.language_models.CodeChatModel.start_chat

start_chat(
    *,
    context: typing.Optional[str] = None,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    message_history: typing.Optional[
        typing.List[vertexai.language_models.ChatMessage]
    ] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None
) -> vertexai.language_models.CodeChatSession

Starts a chat session with the code chat model.

See more: vertexai.language_models.CodeChatModel.start_chat

vertexai.language_models.CodeChatModel.tune_model

tune_model(
    training_data: typing.Union[str, pandas.core.frame.DataFrame],
    *,
    train_steps: typing.Optional[int] = None,
    learning_rate_multiplier: typing.Optional[float] = None,
    tuning_job_location: typing.Optional[str] = None,
    tuned_model_location: typing.Optional[str] = None,
    model_display_name: typing.Optional[str] = None,
    default_context: typing.Optional[str] = None,
    accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None,
    tuning_evaluation_spec: typing.Optional[
        vertexai.language_models.TuningEvaluationSpec
    ] = None
) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models.CodeChatModel.tune_model

vertexai.language_models.CodeChatSession.send_message

send_message(
    message: str,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    candidate_count: typing.Optional[int] = None
) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Sends message to the code chat model and gets a response.

See more: vertexai.language_models.CodeChatSession.send_message

vertexai.language_models.CodeChatSession.send_message_async

send_message_async(
    message: str,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    candidate_count: typing.Optional[int] = None
) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Asynchronously sends message to the code chat model and gets a response.

See more: vertexai.language_models.CodeChatSession.send_message_async

vertexai.language_models.CodeChatSession.send_message_streaming

send_message_streaming(
    message: str,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None
) -> typing.Iterator[vertexai.language_models.TextGenerationResponse]

Sends message to the language model and gets a streamed response.

See more: vertexai.language_models.CodeChatSession.send_message_streaming

vertexai.language_models.CodeChatSession.send_message_streaming_async

send_message_streaming_async(
    message: str,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None
) -> typing.AsyncIterator[vertexai.language_models.TextGenerationResponse]

Asynchronously sends message to the language model and gets a streamed response.

See more: vertexai.language_models.CodeChatSession.send_message_streaming_async

vertexai.language_models.CodeGenerationModel.batch_predict

batch_predict(
    *,
    dataset: typing.Union[str, typing.List[str]],
    destination_uri_prefix: str,
    model_parameters: typing.Optional[typing.Dict] = None
) -> google.cloud.aiplatform.jobs.BatchPredictionJob

Starts a batch prediction job with the model.

See more: vertexai.language_models.CodeGenerationModel.batch_predict

vertexai.language_models.CodeGenerationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.language_models.CodeGenerationModel.get_tuned_model

get_tuned_model(
    tuned_model_name: str,
) -> vertexai.language_models._language_models._LanguageModel

Loads the specified tuned language model.

See more: vertexai.language_models.CodeGenerationModel.get_tuned_model

vertexai.language_models.CodeGenerationModel.list_tuned_model_names

list_tuned_model_names() -> typing.Sequence[str]

vertexai.language_models.CodeGenerationModel.predict

predict(
    prefix: str,
    suffix: typing.Optional[str] = None,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    candidate_count: typing.Optional[int] = None
) -> vertexai.language_models.TextGenerationResponse

Gets model response for a single prompt.

See more: vertexai.language_models.CodeGenerationModel.predict

vertexai.language_models.CodeGenerationModel.predict_async

predict_async(
    prefix: str,
    suffix: typing.Optional[str] = None,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    candidate_count: typing.Optional[int] = None
) -> vertexai.language_models.TextGenerationResponse

Asynchronously gets model response for a single prompt.

See more: vertexai.language_models.CodeGenerationModel.predict_async

vertexai.language_models.CodeGenerationModel.predict_streaming

predict_streaming(
    prefix: str,
    suffix: typing.Optional[str] = None,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None
) -> typing.Iterator[vertexai.language_models.TextGenerationResponse]

Predicts the code based on previous code.

See more: vertexai.language_models.CodeGenerationModel.predict_streaming

vertexai.language_models.CodeGenerationModel.predict_streaming_async

predict_streaming_async(
    prefix: str,
    suffix: typing.Optional[str] = None,
    *,
    max_output_tokens: typing.Optional[int] = None,
    temperature: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None
) -> typing.AsyncIterator[vertexai.language_models.TextGenerationResponse]

Asynchronously predicts the code based on previous code.

See more: vertexai.language_models.CodeGenerationModel.predict_streaming_async

vertexai.language_models.CodeGenerationModel.tune_model

tune_model(
    training_data: typing.Union[str, pandas.core.frame.DataFrame],
    *,
    train_steps: typing.Optional[int] = None,
    learning_rate_multiplier: typing.Optional[float] = None,
    tuning_job_location: typing.Optional[str] = None,
    tuned_model_location: typing.Optional[str] = None,
    model_display_name: typing.Optional[str] = None,
    tuning_evaluation_spec: typing.Optional[
        vertexai.language_models.TuningEvaluationSpec
    ] = None,
    accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None,
    max_context_length: typing.Optional[str] = None
) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models.CodeGenerationModel.tune_model

vertexai.language_models.TextEmbeddingModel.batch_predict

batch_predict(
    *,
    dataset: typing.Union[str, typing.List[str]],
    destination_uri_prefix: str,
    model_parameters: typing.Optional[typing.Dict] = None
) -> google.cloud.aiplatform.jobs.BatchPredictionJob

Starts a batch prediction job with the model.

See more: vertexai.language_models.TextEmbeddingModel.batch_predict

vertexai.language_models.TextEmbeddingModel.count_tokens

count_tokens(
    prompts: typing.List[str],
) -> vertexai.preview.language_models.CountTokensResponse

Counts the tokens and billable characters for a given prompt.

See more: vertexai.language_models.TextEmbeddingModel.count_tokens

vertexai.language_models.TextEmbeddingModel.deploy_tuned_model

deploy_tuned_model(
    tuned_model_name: str,
    machine_type: typing.Optional[str] = None,
    accelerator: typing.Optional[str] = None,
    accelerator_count: typing.Optional[int] = None,
) -> vertexai.language_models._language_models._LanguageModel

Loads the specified tuned language model.

See more: vertexai.language_models.TextEmbeddingModel.deploy_tuned_model

vertexai.language_models.TextEmbeddingModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.language_models.TextEmbeddingModel.get_embeddings

get_embeddings(
    texts: typing.List[typing.Union[str, vertexai.language_models.TextEmbeddingInput]],
    *,
    auto_truncate: bool = True,
    output_dimensionality: typing.Optional[int] = None
) -> typing.List[vertexai.language_models.TextEmbedding]

Calculates embeddings for the given texts.

See more: vertexai.language_models.TextEmbeddingModel.get_embeddings

vertexai.language_models.TextEmbeddingModel.get_embeddings_async

get_embeddings_async(
    texts: typing.List[typing.Union[str, vertexai.language_models.TextEmbeddingInput]],
    *,
    auto_truncate: bool = True,
    output_dimensionality: typing.Optional[int] = None
) -> typing.List[vertexai.language_models.TextEmbedding]

Asynchronously calculates embeddings for the given texts.

See more: vertexai.language_models.TextEmbeddingModel.get_embeddings_async

vertexai.language_models.TextEmbeddingModel.get_tuned_model

get_tuned_model(*args, **kwargs)

Loads the specified tuned language model.

See more: vertexai.language_models.TextEmbeddingModel.get_tuned_model

vertexai.language_models.TextEmbeddingModel.list_tuned_model_names

list_tuned_model_names() -> typing.Sequence[str]

Lists the names of tuned models.

See more: vertexai.language_models.TextEmbeddingModel.list_tuned_model_names

vertexai.language_models.TextEmbeddingModel.tune_model

tune_model(
    *,
    training_data: typing.Optional[str] = None,
    corpus_data: typing.Optional[str] = None,
    queries_data: typing.Optional[str] = None,
    test_data: typing.Optional[str] = None,
    validation_data: typing.Optional[str] = None,
    batch_size: typing.Optional[int] = None,
    train_steps: typing.Optional[int] = None,
    tuned_model_location: typing.Optional[str] = None,
    model_display_name: typing.Optional[str] = None,
    task_type: typing.Optional[str] = None,
    machine_type: typing.Optional[str] = None,
    accelerator: typing.Optional[str] = None,
    accelerator_count: typing.Optional[int] = None,
    output_dimensionality: typing.Optional[int] = None,
    learning_rate_multiplier: typing.Optional[float] = None
) -> vertexai.language_models._language_models._TextEmbeddingModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models.TextEmbeddingModel.tune_model

vertexai.language_models.TextGenerationModel.batch_predict

batch_predict(
    *,
    dataset: typing.Union[str, typing.List[str]],
    destination_uri_prefix: str,
    model_parameters: typing.Optional[typing.Dict] = None
) -> google.cloud.aiplatform.jobs.BatchPredictionJob

Starts a batch prediction job with the model.

See more: vertexai.language_models.TextGenerationModel.batch_predict

vertexai.language_models.TextGenerationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.language_models.TextGenerationModel.get_tuned_model

get_tuned_model(
    tuned_model_name: str,
) -> vertexai.language_models._language_models._LanguageModel

Loads the specified tuned language model.

See more: vertexai.language_models.TextGenerationModel.get_tuned_model

vertexai.language_models.TextGenerationModel.list_tuned_model_names

list_tuned_model_names() -> typing.Sequence[str]

vertexai.language_models.TextGenerationModel.predict

predict(
    prompt: str,
    *,
    max_output_tokens: typing.Optional[int] = 128,
    temperature: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    top_p: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    candidate_count: typing.Optional[int] = None,
    grounding_source: typing.Optional[
        typing.Union[
            vertexai.language_models._language_models.WebSearch,
            vertexai.language_models._language_models.VertexAISearch,
            vertexai.language_models._language_models.InlineContext,
        ]
    ] = None,
    logprobs: typing.Optional[int] = None,
    presence_penalty: typing.Optional[float] = None,
    frequency_penalty: typing.Optional[float] = None,
    logit_bias: typing.Optional[typing.Dict[str, float]] = None,
    seed: typing.Optional[int] = None
) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Gets model response for a single prompt.

See more: vertexai.language_models.TextGenerationModel.predict

vertexai.language_models.TextGenerationModel.predict_async

predict_async(
    prompt: str,
    *,
    max_output_tokens: typing.Optional[int] = 128,
    temperature: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    top_p: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    candidate_count: typing.Optional[int] = None,
    grounding_source: typing.Optional[
        typing.Union[
            vertexai.language_models._language_models.WebSearch,
            vertexai.language_models._language_models.VertexAISearch,
            vertexai.language_models._language_models.InlineContext,
        ]
    ] = None,
    logprobs: typing.Optional[int] = None,
    presence_penalty: typing.Optional[float] = None,
    frequency_penalty: typing.Optional[float] = None,
    logit_bias: typing.Optional[typing.Dict[str, float]] = None,
    seed: typing.Optional[int] = None
) -> vertexai.language_models.MultiCandidateTextGenerationResponse

Asynchronously gets model response for a single prompt.

See more: vertexai.language_models.TextGenerationModel.predict_async

vertexai.language_models.TextGenerationModel.predict_streaming

predict_streaming(
    prompt: str,
    *,
    max_output_tokens: int = 128,
    temperature: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    top_p: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    logprobs: typing.Optional[int] = None,
    presence_penalty: typing.Optional[float] = None,
    frequency_penalty: typing.Optional[float] = None,
    logit_bias: typing.Optional[typing.Dict[str, float]] = None,
    seed: typing.Optional[int] = None
) -> typing.Iterator[vertexai.language_models.TextGenerationResponse]

Gets a streaming model response for a single prompt.

See more: vertexai.language_models.TextGenerationModel.predict_streaming

vertexai.language_models.TextGenerationModel.predict_streaming_async

predict_streaming_async(
    prompt: str,
    *,
    max_output_tokens: int = 128,
    temperature: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    top_p: typing.Optional[float] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    logprobs: typing.Optional[int] = None,
    presence_penalty: typing.Optional[float] = None,
    frequency_penalty: typing.Optional[float] = None,
    logit_bias: typing.Optional[typing.Dict[str, float]] = None,
    seed: typing.Optional[int] = None
) -> typing.AsyncIterator[vertexai.language_models.TextGenerationResponse]

Asynchronously gets a streaming model response for a single prompt.

See more: vertexai.language_models.TextGenerationModel.predict_streaming_async

vertexai.language_models.TextGenerationModel.tune_model

tune_model(
    training_data: typing.Union[str, pandas.core.frame.DataFrame],
    *,
    train_steps: typing.Optional[int] = None,
    learning_rate_multiplier: typing.Optional[float] = None,
    tuning_job_location: typing.Optional[str] = None,
    tuned_model_location: typing.Optional[str] = None,
    model_display_name: typing.Optional[str] = None,
    tuning_evaluation_spec: typing.Optional[
        vertexai.language_models.TuningEvaluationSpec
    ] = None,
    accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None,
    max_context_length: typing.Optional[str] = None
) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models.TextGenerationModel.tune_model

vertexai.language_models.TextGenerationModel.tune_model_rlhf

tune_model_rlhf(
    *,
    prompt_data: typing.Union[str, pandas.core.frame.DataFrame],
    preference_data: typing.Union[str, pandas.core.frame.DataFrame],
    model_display_name: typing.Optional[str] = None,
    prompt_sequence_length: typing.Optional[int] = None,
    target_sequence_length: typing.Optional[int] = None,
    reward_model_learning_rate_multiplier: typing.Optional[float] = None,
    reinforcement_learning_rate_multiplier: typing.Optional[float] = None,
    reward_model_train_steps: typing.Optional[int] = None,
    reinforcement_learning_train_steps: typing.Optional[int] = None,
    kl_coeff: typing.Optional[float] = None,
    default_context: typing.Optional[str] = None,
    tuning_job_location: typing.Optional[str] = None,
    accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None,
    tuning_evaluation_spec: typing.Optional[
        vertexai.language_models.TuningEvaluationSpec
    ] = None
) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model using reinforcement learning from human feedback.

See more: vertexai.language_models.TextGenerationModel.tune_model_rlhf

vertexai.language_models._language_models._TunableModelMixin

_TunableModelMixin(model_id: str, endpoint_name: typing.Optional[str] = None)

vertexai.language_models._language_models._TunableModelMixin.tune_model

tune_model(
    training_data: typing.Union[str, pandas.core.frame.DataFrame],
    *,
    corpus_data: typing.Optional[str] = None,
    queries_data: typing.Optional[str] = None,
    test_data: typing.Optional[str] = None,
    validation_data: typing.Optional[str] = None,
    batch_size: typing.Optional[int] = None,
    train_steps: typing.Optional[int] = None,
    learning_rate: typing.Optional[float] = None,
    learning_rate_multiplier: typing.Optional[float] = None,
    tuning_job_location: typing.Optional[str] = None,
    tuned_model_location: typing.Optional[str] = None,
    model_display_name: typing.Optional[str] = None,
    tuning_evaluation_spec: typing.Optional[
        vertexai.language_models.TuningEvaluationSpec
    ] = None,
    default_context: typing.Optional[str] = None,
    task_type: typing.Optional[str] = None,
    machine_type: typing.Optional[str] = None,
    accelerator: typing.Optional[str] = None,
    accelerator_count: typing.Optional[int] = None,
    accelerator_type: typing.Optional[typing.Literal["TPU", "GPU"]] = None,
    max_context_length: typing.Optional[str] = None,
    output_dimensionality: typing.Optional[int] = None
) -> vertexai.language_models._language_models._LanguageModelTuningJob

Tunes a model based on training data.

See more: vertexai.language_models._language_models._TunableModelMixin.tune_model

vertexai.preview.generative_models.AutomaticFunctionCallingResponder

AutomaticFunctionCallingResponder(max_automatic_function_calls: int = 1)

vertexai.preview.generative_models.CallableFunctionDeclaration

CallableFunctionDeclaration(
    name: str,
    function: typing.Callable[[...], typing.Any],
    parameters: typing.Dict[str, typing.Any],
    description: typing.Optional[str] = None,
)

Constructs a FunctionDeclaration.

See more: vertexai.preview.generative_models.CallableFunctionDeclaration

vertexai.preview.generative_models.CallableFunctionDeclaration.from_func

from_func(
    func: typing.Callable[[...], typing.Any]
) -> vertexai.generative_models._generative_models.CallableFunctionDeclaration

Automatically creates a CallableFunctionDeclaration from a Python function.

See more: vertexai.preview.generative_models.CallableFunctionDeclaration.from_func

vertexai.preview.generative_models.ChatSession.send_message

send_message(
    content: typing.Union[
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[vertexai.generative_models._generative_models.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    labels: typing.Optional[typing.Dict[str, str]] = None,
    stream: bool = False
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse],
]

vertexai.preview.generative_models.ChatSession.send_message_async

send_message_async(
    content: typing.Union[
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[vertexai.generative_models._generative_models.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    labels: typing.Optional[typing.Dict[str, str]] = None,
    stream: bool = False
) -> typing.Union[
    typing.Awaitable[vertexai.generative_models._generative_models.GenerationResponse],
    typing.Awaitable[
        typing.AsyncIterable[
            vertexai.generative_models._generative_models.GenerationResponse
        ]
    ],
]

Generates content asynchronously.

See more: vertexai.preview.generative_models.ChatSession.send_message_async

vertexai.preview.generative_models.FunctionDeclaration

FunctionDeclaration(
    *,
    name: str,
    parameters: typing.Dict[str, typing.Any],
    description: typing.Optional[str] = None,
    response: typing.Optional[typing.Dict[str, typing.Any]] = None
)

Constructs a FunctionDeclaration.

See more: vertexai.preview.generative_models.FunctionDeclaration

vertexai.preview.generative_models.GenerationConfig

GenerationConfig(
    *,
    temperature: typing.Optional[float] = None,
    top_p: typing.Optional[float] = None,
    top_k: typing.Optional[int] = None,
    candidate_count: typing.Optional[int] = None,
    max_output_tokens: typing.Optional[int] = None,
    stop_sequences: typing.Optional[typing.List[str]] = None,
    presence_penalty: typing.Optional[float] = None,
    frequency_penalty: typing.Optional[float] = None,
    response_mime_type: typing.Optional[str] = None,
    response_schema: typing.Optional[typing.Dict[str, typing.Any]] = None,
    seed: typing.Optional[int] = None,
    audio_timestamp: typing.Optional[bool] = None,
    routing_config: typing.Optional[RoutingConfig] = None,
    logprobs: typing.Optional[int] = None,
    response_logprobs: typing.Optional[bool] = None,
    response_modalities: typing.Optional[typing.List[GenerationConfig.Modality]] = None
)

Constructs a GenerationConfig object.

See more: vertexai.preview.generative_models.GenerationConfig

vertexai.preview.generative_models.GenerationConfig.RoutingConfig.AutoRoutingMode

AutoRoutingMode(
    *,
    model_routing_preference: google.cloud.aiplatform_v1beta1.types.content.GenerationConfig.RoutingConfig.AutoRoutingMode.ModelRoutingPreference
)

vertexai.preview.generative_models.GenerationConfig.RoutingConfig.ManualRoutingMode

ManualRoutingMode(*, model_name: str)

vertexai.preview.generative_models.GenerativeModel.compute_tokens

compute_tokens(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ]
) -> google.cloud.aiplatform_v1beta1.types.llm_utility_service.ComputeTokensResponse

vertexai.preview.generative_models.GenerativeModel.compute_tokens_async

compute_tokens_async(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ]
) -> google.cloud.aiplatform_v1beta1.types.llm_utility_service.ComputeTokensResponse

vertexai.preview.generative_models.GenerativeModel.count_tokens

count_tokens(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None
) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponse

vertexai.preview.generative_models.GenerativeModel.count_tokens_async

count_tokens_async(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None
) -> google.cloud.aiplatform_v1beta1.types.prediction_service.CountTokensResponse

vertexai.preview.generative_models.GenerativeModel.from_cached_content

from_cached_content(
    cached_content: typing.Union[str, caching.CachedContent],
    *,
    generation_config: typing.Optional[
        typing.Union[GenerationConfig, typing.Dict[str, typing.Any]]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None
) -> _GenerativeModel

Creates a model from cached content.

See more: vertexai.preview.generative_models.GenerativeModel.from_cached_content

vertexai.preview.generative_models.GenerativeModel.generate_content

generate_content(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[vertexai.generative_models._generative_models.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None,
    labels: typing.Optional[typing.Dict[str, str]] = None,
    stream: bool = False
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse],
]

vertexai.preview.generative_models.GenerativeModel.generate_content_async

generate_content_async(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[vertexai.generative_models._generative_models.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None,
    labels: typing.Optional[typing.Dict[str, str]] = None,
    stream: bool = False
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.AsyncIterable[
        vertexai.generative_models._generative_models.GenerationResponse
    ],
]

vertexai.preview.generative_models.GenerativeModel.start_chat

start_chat(
    *,
    history: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Content]
    ] = None,
    response_validation: bool = True,
    responder: typing.Optional[
        vertexai.generative_models._generative_models.AutomaticFunctionCallingResponder
    ] = None
) -> vertexai.generative_models._generative_models.ChatSession

Creates a stateful chat session.

See more: vertexai.preview.generative_models.GenerativeModel.start_chat

vertexai.preview.generative_models.Image.from_bytes

from_bytes(data: bytes) -> vertexai.generative_models._generative_models.Image

Loads image from image bytes.

See more: vertexai.preview.generative_models.Image.from_bytes

vertexai.preview.generative_models.Image.load_from_file

load_from_file(
    location: str,
) -> vertexai.generative_models._generative_models.Image

vertexai.preview.generative_models.ResponseBlockedError.with_traceback

Exception.with_traceback(tb) -- set self.traceback to tb and return self.

See more: vertexai.preview.generative_models.ResponseBlockedError.with_traceback

vertexai.preview.generative_models.ResponseValidationError.with_traceback

Exception.with_traceback(tb) -- set self.traceback to tb and return self.

See more: vertexai.preview.generative_models.ResponseValidationError.with_traceback

vertexai.preview.generative_models.SafetySetting

SafetySetting(
    *,
    category: google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
    threshold: google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
    method: typing.Optional[
        google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockMethod
    ] = None
)

vertexai.preview.prompts.Prompt

Prompt(
    prompt_data: typing.Optional[
        typing.Union[
            str,
            vertexai.generative_models._generative_models.Image,
            vertexai.generative_models._generative_models.Part,
            typing.List[
                typing.Union[
                    str,
                    vertexai.generative_models._generative_models.Image,
                    vertexai.generative_models._generative_models.Part,
                ]
            ],
        ]
    ] = None,
    *,
    variables: typing.Optional[
        typing.List[
            typing.Dict[
                str,
                typing.Union[
                    str,
                    vertexai.generative_models._generative_models.Image,
                    vertexai.generative_models._generative_models.Part,
                    typing.List[
                        typing.Union[
                            str,
                            vertexai.generative_models._generative_models.Image,
                            vertexai.generative_models._generative_models.Part,
                        ]
                    ],
                ],
            ]
        ]
    ] = None,
    prompt_name: typing.Optional[str] = None,
    generation_config: typing.Optional[
        vertexai.generative_models._generative_models.GenerationConfig
    ] = None,
    model_name: typing.Optional[str] = None,
    safety_settings: typing.Optional[
        vertexai.generative_models._generative_models.SafetySetting
    ] = None,
    system_instruction: typing.Optional[
        typing.Union[
            str,
            vertexai.generative_models._generative_models.Image,
            vertexai.generative_models._generative_models.Part,
            typing.List[
                typing.Union[
                    str,
                    vertexai.generative_models._generative_models.Image,
                    vertexai.generative_models._generative_models.Part,
                ]
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None
)

Initializes the Prompt with a given prompt, and variables.

See more: vertexai.preview.prompts.Prompt

vertexai.preview.prompts.Prompt.__repr__

__repr__() -> str

Returns a string representation of the unassembled prompt.

See more: vertexai.preview.prompts.Prompt.repr

vertexai.preview.prompts.Prompt.__str__

__str__() -> str

Returns the prompt data as a string, without any variables replaced.

See more: vertexai.preview.prompts.Prompt.str

vertexai.preview.prompts.Prompt.assemble_contents

assemble_contents(
    **variables_dict: typing.Union[
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ]
) -> typing.List[vertexai.generative_models._generative_models.Content]

Returns the prompt data, as a List[Content], assembled with variables if applicable.

See more: vertexai.preview.prompts.Prompt.assemble_contents

vertexai.preview.prompts.Prompt.generate_content

generate_content(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[vertexai.generative_models._generative_models.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    model_name: typing.Optional[str] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None,
    stream: bool = False,
    system_instruction: typing.Optional[
        typing.Union[
            str,
            vertexai.generative_models._generative_models.Image,
            vertexai.generative_models._generative_models.Part,
            typing.List[
                typing.Union[
                    str,
                    vertexai.generative_models._generative_models.Image,
                    vertexai.generative_models._generative_models.Part,
                ]
            ],
        ]
    ] = None
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse],
]

Generates content using the saved Prompt configs.

See more: vertexai.preview.prompts.Prompt.generate_content

vertexai.preview.prompts.Prompt.get_unassembled_prompt_data

get_unassembled_prompt_data() -> typing.Union[
    str,
    vertexai.generative_models._generative_models.Image,
    vertexai.generative_models._generative_models.Part,
    typing.List[
        typing.Union[
            str,
            vertexai.generative_models._generative_models.Image,
            vertexai.generative_models._generative_models.Part,
        ]
    ],
]

Returns the prompt data, without any variables replaced.

See more: vertexai.preview.prompts.Prompt.get_unassembled_prompt_data

vertexai.preview.reasoning_engines.LangchainAgent

LangchainAgent(
    model: str,
    *,
    system_instruction: typing.Optional[str] = None,
    prompt: typing.Optional[RunnableSerializable] = None,
    tools: typing.Optional[typing.Sequence[_ToolLike]] = None,
    output_parser: typing.Optional[RunnableSerializable] = None,
    chat_history: typing.Optional[GetSessionHistoryCallable] = None,
    model_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None,
    model_tool_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None,
    agent_executor_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None,
    runnable_kwargs: typing.Optional[typing.Mapping[str, typing.Any]] = None,
    model_builder: typing.Optional[typing.Callable] = None,
    runnable_builder: typing.Optional[typing.Callable] = None,
    enable_tracing: bool = False
)

Initializes the LangchainAgent.

See more: vertexai.preview.reasoning_engines.LangchainAgent

vertexai.preview.reasoning_engines.LangchainAgent.clone

clone() -> vertexai.preview.reasoning_engines.templates.langchain.LangchainAgent

Returns a clone of the LangchainAgent.

See more: vertexai.preview.reasoning_engines.LangchainAgent.clone

vertexai.preview.reasoning_engines.LangchainAgent.query

query(
    *,
    input: typing.Union[str, typing.Mapping[str, typing.Any]],
    config: typing.Optional[RunnableConfig] = None,
    **kwargs: typing.Any
) -> typing.Dict[str, typing.Any]

Queries the Agent with the given input and config.

See more: vertexai.preview.reasoning_engines.LangchainAgent.query

vertexai.preview.reasoning_engines.LangchainAgent.set_up

set_up()

Sets up the agent for execution of queries at runtime.

See more: vertexai.preview.reasoning_engines.LangchainAgent.set_up

vertexai.preview.reasoning_engines.LangchainAgent.stream_query

stream_query(
    *,
    input: typing.Union[str, typing.Mapping[str, typing.Any]],
    config: typing.Optional[RunnableConfig] = None,
    **kwargs
) -> typing.Iterable[typing.Any]

Stream queries the Agent with the given input and config.

See more: vertexai.preview.reasoning_engines.LangchainAgent.stream_query

vertexai.preview.reasoning_engines.Queryable.query

query(**kwargs)

Runs the Reasoning Engine to serve the user query.

See more: vertexai.preview.reasoning_engines.Queryable.query

vertexai.preview.reasoning_engines.ReasoningEngine

ReasoningEngine(reasoning_engine_name: str)

Retrieves a Reasoning Engine resource.

See more: vertexai.preview.reasoning_engines.ReasoningEngine

vertexai.preview.reasoning_engines.ReasoningEngine.create

create(
    reasoning_engine: typing.Union[
        vertexai.reasoning_engines._reasoning_engines.Queryable,
        vertexai.reasoning_engines._reasoning_engines.OperationRegistrable,
    ],
    *,
    requirements: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
    reasoning_engine_name: typing.Optional[str] = None,
    display_name: typing.Optional[str] = None,
    description: typing.Optional[str] = None,
    gcs_dir_name: str = "reasoning_engine",
    sys_version: typing.Optional[str] = None,
    extra_packages: typing.Optional[typing.Sequence[str]] = None
) -> vertexai.reasoning_engines._reasoning_engines.ReasoningEngine

Creates a new ReasoningEngine.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.create

vertexai.preview.reasoning_engines.ReasoningEngine.delete

delete(sync: bool = True) -> None

Deletes this Vertex AI resource.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.delete

vertexai.preview.reasoning_engines.ReasoningEngine.list

list(
    filter: typing.Optional[str] = None,
    order_by: typing.Optional[str] = None,
    project: typing.Optional[str] = None,
    location: typing.Optional[str] = None,
    credentials: typing.Optional[google.auth.credentials.Credentials] = None,
    parent: typing.Optional[str] = None,
) -> typing.List[google.cloud.aiplatform.base.VertexAiResourceNoun]

List all instances of this Vertex AI Resource.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.list

vertexai.preview.reasoning_engines.ReasoningEngine.operation_schemas

operation_schemas() -> typing.Sequence[typing.Dict[str, typing.Any]]

Returns the (Open)API schemas for the Reasoning Engine.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.operation_schemas

vertexai.preview.reasoning_engines.ReasoningEngine.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.to_dict

vertexai.preview.reasoning_engines.ReasoningEngine.update

update(
    *,
    reasoning_engine: typing.Optional[
        typing.Union[
            vertexai.reasoning_engines._reasoning_engines.Queryable,
            vertexai.reasoning_engines._reasoning_engines.OperationRegistrable,
        ]
    ] = None,
    requirements: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
    display_name: typing.Optional[str] = None,
    description: typing.Optional[str] = None,
    gcs_dir_name: str = "reasoning_engine",
    sys_version: typing.Optional[str] = None,
    extra_packages: typing.Optional[typing.Sequence[str]] = None
) -> vertexai.reasoning_engines._reasoning_engines.ReasoningEngine

Updates an existing ReasoningEngine.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.update

vertexai.preview.reasoning_engines.ReasoningEngine.wait

wait()

Helper method that blocks until all futures are complete.

See more: vertexai.preview.reasoning_engines.ReasoningEngine.wait

vertexai.preview.tuning.TuningJob

TuningJob(tuning_job_name: str)

Initializes class with project, location, and api_client.

See more: vertexai.preview.tuning.TuningJob

vertexai.preview.tuning.TuningJob.list

list(
    filter: typing.Optional[str] = None,
) -> typing.List[vertexai.tuning._tuning.TuningJob]

Lists TuningJobs.

See more: vertexai.preview.tuning.TuningJob.list

vertexai.preview.tuning.TuningJob.refresh

refresh() -> vertexai.tuning._tuning.TuningJob

Refreshed the tuning job from the service.

See more: vertexai.preview.tuning.TuningJob.refresh

vertexai.preview.tuning.TuningJob.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.preview.tuning.TuningJob.to_dict

vertexai.preview.tuning.sft.SupervisedTuningJob.list

list(
    filter: typing.Optional[str] = None,
) -> typing.List[vertexai.tuning._tuning.TuningJob]

vertexai.preview.tuning.sft.SupervisedTuningJob.refresh

refresh() -> vertexai.tuning._tuning.TuningJob

Refreshed the tuning job from the service.

See more: vertexai.preview.tuning.sft.SupervisedTuningJob.refresh

vertexai.preview.tuning.sft.SupervisedTuningJob.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.preview.tuning.sft.SupervisedTuningJob.to_dict

vertexai.preview.vision_models.ControlReferenceImage

ControlReferenceImage(
    reference_id,
    image: typing.Optional[
        typing.Union[bytes, vertexai.vision_models.Image, str]
    ] = None,
    control_type: typing.Optional[
        typing.Literal["default", "scribble", "face_mesh", "canny"]
    ] = None,
    enable_control_image_computation: typing.Optional[bool] = False,
)

Creates a ControlReferenceImage object.

See more: vertexai.preview.vision_models.ControlReferenceImage

vertexai.preview.vision_models.GeneratedImage

GeneratedImage(
    image_bytes: typing.Optional[bytes],
    generation_parameters: typing.Dict[str, typing.Any],
    gcs_uri: typing.Optional[str] = None,
)

Creates a GeneratedImage object.

See more: vertexai.preview.vision_models.GeneratedImage

vertexai.preview.vision_models.GeneratedImage.load_from_file

load_from_file(location: str) -> vertexai.preview.vision_models.GeneratedImage

vertexai.preview.vision_models.GeneratedImage.save

save(location: str, include_generation_parameters: bool = True)

Saves image to a file.

See more: vertexai.preview.vision_models.GeneratedImage.save

vertexai.preview.vision_models.GeneratedImage.show

show()

vertexai.preview.vision_models.GeneratedMask

GeneratedMask(
    image_bytes: typing.Optional[bytes],
    gcs_uri: typing.Optional[str] = None,
    labels: typing.Optional[
        typing.List[vertexai.preview.vision_models.EntityLabel]
    ] = None,
)

Creates a GeneratedMask object.

See more: vertexai.preview.vision_models.GeneratedMask

vertexai.preview.vision_models.GeneratedMask.load_from_file

load_from_file(location: str) -> vertexai.vision_models.Image

Loads image from local file or Google Cloud Storage.

See more: vertexai.preview.vision_models.GeneratedMask.load_from_file

vertexai.preview.vision_models.GeneratedMask.save

save(location: str)

Saves image to a file.

See more: vertexai.preview.vision_models.GeneratedMask.save

vertexai.preview.vision_models.GeneratedMask.show

show()

vertexai.preview.vision_models.Image

Image(
    image_bytes: typing.Optional[bytes] = None, gcs_uri: typing.Optional[str] = None
)

Creates an Image object.

See more: vertexai.preview.vision_models.Image

vertexai.preview.vision_models.Image.load_from_file

load_from_file(location: str) -> vertexai.vision_models.Image

Loads image from local file or Google Cloud Storage.

See more: vertexai.preview.vision_models.Image.load_from_file

vertexai.preview.vision_models.Image.save

save(location: str)

Saves image to a file.

See more: vertexai.preview.vision_models.Image.save

vertexai.preview.vision_models.Image.show

show()

Shows the image.

See more: vertexai.preview.vision_models.Image.show

vertexai.preview.vision_models.ImageCaptioningModel

ImageCaptioningModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.ImageCaptioningModel

vertexai.preview.vision_models.ImageCaptioningModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.ImageCaptioningModel.get_captions

get_captions(
    image: vertexai.vision_models.Image,
    *,
    number_of_results: int = 1,
    language: str = "en",
    output_gcs_uri: typing.Optional[str] = None
) -> typing.List[str]

Generates captions for a given image.

See more: vertexai.preview.vision_models.ImageCaptioningModel.get_captions

vertexai.preview.vision_models.ImageGenerationModel

ImageGenerationModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.ImageGenerationModel

vertexai.preview.vision_models.ImageGenerationModel.edit_image

edit_image(
    *,
    prompt: str,
    base_image: typing.Optional[vertexai.vision_models.Image] = None,
    mask: typing.Optional[vertexai.vision_models.Image] = None,
    reference_images: typing.Optional[
        typing.List[vertexai.vision_models.ReferenceImage]
    ] = None,
    negative_prompt: typing.Optional[str] = None,
    number_of_images: int = 1,
    guidance_scale: typing.Optional[float] = None,
    edit_mode: typing.Optional[
        typing.Literal[
            "inpainting-insert", "inpainting-remove", "outpainting", "product-image"
        ]
    ] = None,
    mask_mode: typing.Optional[
        typing.Literal["background", "foreground", "semantic"]
    ] = None,
    segmentation_classes: typing.Optional[typing.List[str]] = None,
    mask_dilation: typing.Optional[float] = None,
    product_position: typing.Optional[typing.Literal["fixed", "reposition"]] = None,
    output_mime_type: typing.Optional[typing.Literal["image/png", "image/jpeg"]] = None,
    compression_quality: typing.Optional[float] = None,
    language: typing.Optional[str] = None,
    seed: typing.Optional[int] = None,
    output_gcs_uri: typing.Optional[str] = None,
    safety_filter_level: typing.Optional[
        typing.Literal["block_most", "block_some", "block_few", "block_fewest"]
    ] = None,
    person_generation: typing.Optional[
        typing.Literal["dont_allow", "allow_adult", "allow_all"]
    ] = None
) -> vertexai.preview.vision_models.ImageGenerationResponse

Edits an existing image based on text prompt.

See more: vertexai.preview.vision_models.ImageGenerationModel.edit_image

vertexai.preview.vision_models.ImageGenerationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.ImageGenerationModel.generate_images

generate_images(
    prompt: str,
    *,
    negative_prompt: typing.Optional[str] = None,
    number_of_images: int = 1,
    aspect_ratio: typing.Optional[
        typing.Literal["1:1", "9:16", "16:9", "4:3", "3:4"]
    ] = None,
    guidance_scale: typing.Optional[float] = None,
    language: typing.Optional[str] = None,
    seed: typing.Optional[int] = None,
    output_gcs_uri: typing.Optional[str] = None,
    add_watermark: typing.Optional[bool] = True,
    safety_filter_level: typing.Optional[
        typing.Literal["block_most", "block_some", "block_few", "block_fewest"]
    ] = None,
    person_generation: typing.Optional[
        typing.Literal["dont_allow", "allow_adult", "allow_all"]
    ] = None
) -> vertexai.preview.vision_models.ImageGenerationResponse

Generates images from text prompt.

See more: vertexai.preview.vision_models.ImageGenerationModel.generate_images

vertexai.preview.vision_models.ImageGenerationModel.upscale_image

upscale_image(
    image: typing.Union[
        vertexai.vision_models.Image, vertexai.preview.vision_models.GeneratedImage
    ],
    new_size: typing.Optional[int] = 2048,
    upscale_factor: typing.Optional[typing.Literal["x2", "x4"]] = None,
    output_mime_type: typing.Optional[
        typing.Literal["image/png", "image/jpeg"]
    ] = "image/png",
    output_compression_quality: typing.Optional[int] = None,
    output_gcs_uri: typing.Optional[str] = None,
) -> vertexai.vision_models.Image

vertexai.preview.vision_models.ImageGenerationResponse.__getitem__

__getitem__(idx: int) -> vertexai.preview.vision_models.GeneratedImage

Gets the generated image by index.

See more: vertexai.preview.vision_models.ImageGenerationResponse.getitem

vertexai.preview.vision_models.ImageGenerationResponse.__iter__

__iter__() -> typing.Iterator[vertexai.preview.vision_models.GeneratedImage]

Iterates through the generated images.

See more: vertexai.preview.vision_models.ImageGenerationResponse.iter

vertexai.preview.vision_models.ImageQnAModel

ImageQnAModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.ImageQnAModel

vertexai.preview.vision_models.ImageQnAModel.ask_question

ask_question(
    image: vertexai.vision_models.Image, question: str, *, number_of_results: int = 1
) -> typing.List[str]

Answers questions about an image.

See more: vertexai.preview.vision_models.ImageQnAModel.ask_question

vertexai.preview.vision_models.ImageQnAModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.ImageSegmentationModel

ImageSegmentationModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.ImageSegmentationModel

vertexai.preview.vision_models.ImageSegmentationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.ImageSegmentationModel.segment_image

segment_image(
    base_image: vertexai.vision_models.Image,
    prompt: typing.Optional[str] = None,
    scribble: typing.Optional[vertexai.preview.vision_models.Scribble] = None,
    mode: typing.Literal[
        "foreground", "background", "semantic", "prompt", "interactive"
    ] = "foreground",
    max_predictions: typing.Optional[int] = None,
    confidence_threshold: typing.Optional[float] = 0.1,
    mask_dilation: typing.Optional[float] = None,
) -> vertexai.preview.vision_models.ImageSegmentationResponse

vertexai.preview.vision_models.ImageSegmentationResponse.__getitem__

__getitem__(idx: int) -> vertexai.preview.vision_models.GeneratedMask

Gets the generated masks by index.

See more: vertexai.preview.vision_models.ImageSegmentationResponse.getitem

vertexai.preview.vision_models.ImageSegmentationResponse.__iter__

__iter__() -> typing.Iterator[vertexai.preview.vision_models.GeneratedMask]

Iterates through the generated masks.

See more: vertexai.preview.vision_models.ImageSegmentationResponse.iter

vertexai.preview.vision_models.ImageTextModel

ImageTextModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.ImageTextModel

vertexai.preview.vision_models.ImageTextModel.ask_question

ask_question(
    image: vertexai.vision_models.Image, question: str, *, number_of_results: int = 1
) -> typing.List[str]

Answers questions about an image.

See more: vertexai.preview.vision_models.ImageTextModel.ask_question

vertexai.preview.vision_models.ImageTextModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.ImageTextModel.get_captions

get_captions(
    image: vertexai.vision_models.Image,
    *,
    number_of_results: int = 1,
    language: str = "en",
    output_gcs_uri: typing.Optional[str] = None
) -> typing.List[str]

Generates captions for a given image.

See more: vertexai.preview.vision_models.ImageTextModel.get_captions

vertexai.preview.vision_models.MaskReferenceImage

MaskReferenceImage(
    reference_id,
    image: typing.Optional[
        typing.Union[bytes, vertexai.vision_models.Image, str]
    ] = None,
    mask_mode: typing.Optional[
        typing.Literal[
            "default", "user_provided", "background", "foreground", "semantic"
        ]
    ] = None,
    dilation: typing.Optional[float] = None,
    segmentation_classes: typing.Optional[typing.List[int]] = None,
)

Creates a MaskReferenceImage object.

See more: vertexai.preview.vision_models.MaskReferenceImage

vertexai.preview.vision_models.MultiModalEmbeddingModel

MultiModalEmbeddingModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.MultiModalEmbeddingModel

vertexai.preview.vision_models.MultiModalEmbeddingModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.MultiModalEmbeddingModel.get_embeddings

get_embeddings(
    image: typing.Optional[vertexai.vision_models.Image] = None,
    video: typing.Optional[vertexai.vision_models.Video] = None,
    contextual_text: typing.Optional[str] = None,
    dimension: typing.Optional[int] = None,
    video_segment_config: typing.Optional[
        vertexai.vision_models.VideoSegmentConfig
    ] = None,
) -> vertexai.vision_models.MultiModalEmbeddingResponse

Gets embedding vectors from the provided image.

See more: vertexai.preview.vision_models.MultiModalEmbeddingModel.get_embeddings

vertexai.preview.vision_models.RawReferenceImage

RawReferenceImage(
    reference_id,
    image: typing.Optional[
        typing.Union[bytes, vertexai.vision_models.Image, str]
    ] = None,
)

Creates a ReferenceImage object.

See more: vertexai.preview.vision_models.RawReferenceImage

vertexai.preview.vision_models.ReferenceImage

ReferenceImage(
    reference_id,
    image: typing.Optional[
        typing.Union[bytes, vertexai.vision_models.Image, str]
    ] = None,
)

Creates a ReferenceImage object.

See more: vertexai.preview.vision_models.ReferenceImage

vertexai.preview.vision_models.Scribble

Scribble(image_bytes: typing.Optional[bytes], gcs_uri: typing.Optional[str] = None)

Creates a Scribble object.

See more: vertexai.preview.vision_models.Scribble

vertexai.preview.vision_models.StyleReferenceImage

StyleReferenceImage(
    reference_id,
    image: typing.Optional[
        typing.Union[bytes, vertexai.vision_models.Image, str]
    ] = None,
    style_description: typing.Optional[str] = None,
)

Creates a StyleReferenceImage object.

See more: vertexai.preview.vision_models.StyleReferenceImage

vertexai.preview.vision_models.SubjectReferenceImage

SubjectReferenceImage(
    reference_id,
    image: typing.Optional[
        typing.Union[bytes, vertexai.vision_models.Image, str]
    ] = None,
    subject_description: typing.Optional[str] = None,
    subject_type: typing.Optional[
        typing.Literal["default", "person", "animal", "product"]
    ] = None,
)

Creates a SubjectReferenceImage object.

See more: vertexai.preview.vision_models.SubjectReferenceImage

vertexai.preview.vision_models.Video

Video(
    video_bytes: typing.Optional[bytes] = None, gcs_uri: typing.Optional[str] = None
)

Creates a Video object.

See more: vertexai.preview.vision_models.Video

vertexai.preview.vision_models.Video.load_from_file

load_from_file(location: str) -> vertexai.vision_models.Video

Loads video from local file or Google Cloud Storage.

See more: vertexai.preview.vision_models.Video.load_from_file

vertexai.preview.vision_models.Video.save

save(location: str)

Saves video to a file.

See more: vertexai.preview.vision_models.Video.save

vertexai.preview.vision_models.VideoEmbedding

VideoEmbedding(
    start_offset_sec: int, end_offset_sec: int, embedding: typing.List[float]
)

Creates a VideoEmbedding object.

See more: vertexai.preview.vision_models.VideoEmbedding

vertexai.preview.vision_models.VideoSegmentConfig

VideoSegmentConfig(
    start_offset_sec: int = 0, end_offset_sec: int = 120, interval_sec: int = 16
)

Creates a VideoSegmentConfig object.

See more: vertexai.preview.vision_models.VideoSegmentConfig

vertexai.preview.vision_models.WatermarkVerificationModel

WatermarkVerificationModel(
    model_id: str, endpoint_name: typing.Optional[str] = None
)

Creates a _ModelGardenModel.

See more: vertexai.preview.vision_models.WatermarkVerificationModel

vertexai.preview.vision_models.WatermarkVerificationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.preview.vision_models.WatermarkVerificationModel.verify_image

verify_image(
    image: vertexai.vision_models.Image,
) -> vertexai.preview.vision_models.WatermarkVerificationResponse

Verifies the watermark of an image.

See more: vertexai.preview.vision_models.WatermarkVerificationModel.verify_image

vertexai.prompts._prompts.Prompt

Prompt(
    prompt_data: typing.Optional[
        typing.Union[
            str,
            vertexai.generative_models._generative_models.Image,
            vertexai.generative_models._generative_models.Part,
            typing.List[
                typing.Union[
                    str,
                    vertexai.generative_models._generative_models.Image,
                    vertexai.generative_models._generative_models.Part,
                ]
            ],
        ]
    ] = None,
    *,
    variables: typing.Optional[
        typing.List[
            typing.Dict[
                str,
                typing.Union[
                    str,
                    vertexai.generative_models._generative_models.Image,
                    vertexai.generative_models._generative_models.Part,
                    typing.List[
                        typing.Union[
                            str,
                            vertexai.generative_models._generative_models.Image,
                            vertexai.generative_models._generative_models.Part,
                        ]
                    ],
                ],
            ]
        ]
    ] = None,
    prompt_name: typing.Optional[str] = None,
    generation_config: typing.Optional[
        vertexai.generative_models._generative_models.GenerationConfig
    ] = None,
    model_name: typing.Optional[str] = None,
    safety_settings: typing.Optional[
        vertexai.generative_models._generative_models.SafetySetting
    ] = None,
    system_instruction: typing.Optional[
        typing.Union[
            str,
            vertexai.generative_models._generative_models.Image,
            vertexai.generative_models._generative_models.Part,
            typing.List[
                typing.Union[
                    str,
                    vertexai.generative_models._generative_models.Image,
                    vertexai.generative_models._generative_models.Part,
                ]
            ],
        ]
    ] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None
)

Initializes the Prompt with a given prompt, and variables.

See more: vertexai.prompts._prompts.Prompt

vertexai.prompts._prompts.Prompt.__repr__

__repr__() -> str

Returns a string representation of the unassembled prompt.

See more: vertexai.prompts.prompts.Prompt._repr

vertexai.prompts._prompts.Prompt.__str__

__str__() -> str

Returns the prompt data as a string, without any variables replaced.

See more: vertexai.prompts.prompts.Prompt._str

vertexai.prompts._prompts.Prompt.assemble_contents

assemble_contents(
    **variables_dict: typing.Union[
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ]
) -> typing.List[vertexai.generative_models._generative_models.Content]

Returns the prompt data, as a List[Content], assembled with variables if applicable.

See more: vertexai.prompts._prompts.Prompt.assemble_contents

vertexai.prompts._prompts.Prompt.generate_content

generate_content(
    contents: typing.Union[
        typing.List[vertexai.generative_models._generative_models.Content],
        typing.List[typing.Dict[str, typing.Any]],
        str,
        vertexai.generative_models._generative_models.Image,
        vertexai.generative_models._generative_models.Part,
        typing.List[
            typing.Union[
                str,
                vertexai.generative_models._generative_models.Image,
                vertexai.generative_models._generative_models.Part,
            ]
        ],
    ],
    *,
    generation_config: typing.Optional[
        typing.Union[
            vertexai.generative_models._generative_models.GenerationConfig,
            typing.Dict[str, typing.Any],
        ]
    ] = None,
    safety_settings: typing.Optional[
        typing.Union[
            typing.List[vertexai.generative_models._generative_models.SafetySetting],
            typing.Dict[
                google.cloud.aiplatform_v1beta1.types.content.HarmCategory,
                google.cloud.aiplatform_v1beta1.types.content.SafetySetting.HarmBlockThreshold,
            ],
        ]
    ] = None,
    model_name: typing.Optional[str] = None,
    tools: typing.Optional[
        typing.List[vertexai.generative_models._generative_models.Tool]
    ] = None,
    tool_config: typing.Optional[
        vertexai.generative_models._generative_models.ToolConfig
    ] = None,
    stream: bool = False,
    system_instruction: typing.Optional[
        typing.Union[
            str,
            vertexai.generative_models._generative_models.Image,
            vertexai.generative_models._generative_models.Part,
            typing.List[
                typing.Union[
                    str,
                    vertexai.generative_models._generative_models.Image,
                    vertexai.generative_models._generative_models.Part,
                ]
            ],
        ]
    ] = None
) -> typing.Union[
    vertexai.generative_models._generative_models.GenerationResponse,
    typing.Iterable[vertexai.generative_models._generative_models.GenerationResponse],
]

Generates content using the saved Prompt configs.

See more: vertexai.prompts._prompts.Prompt.generate_content

vertexai.prompts._prompts.Prompt.get_unassembled_prompt_data

get_unassembled_prompt_data() -> typing.Union[
    str,
    vertexai.generative_models._generative_models.Image,
    vertexai.generative_models._generative_models.Part,
    typing.List[
        typing.Union[
            str,
            vertexai.generative_models._generative_models.Image,
            vertexai.generative_models._generative_models.Part,
        ]
    ],
]

Returns the prompt data, without any variables replaced.

See more: vertexai.prompts._prompts.Prompt.get_unassembled_prompt_data

vertexai.resources.preview.ml_monitoring.ModelMonitor

ModelMonitor(
    model_monitor_name: str,
    project: typing.Optional[str] = None,
    location: typing.Optional[str] = None,
    credentials: typing.Optional[google.auth.credentials.Credentials] = None,
)

Initializes class with project, location, and api_client.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor

vertexai.resources.preview.ml_monitoring.ModelMonitor.create

create(
    model_name: str,
    model_version_id: str,
    training_dataset: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput
    ] = None,
    display_name: typing.Optional[str] = None,
    model_monitoring_schema: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.schema.ModelMonitoringSchema
    ] = None,
    tabular_objective_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective
    ] = None,
    output_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec
    ] = None,
    notification_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec
    ] = None,
    explanation_spec: typing.Optional[
        google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec
    ] = None,
    project: typing.Optional[str] = None,
    location: typing.Optional[str] = None,
    credentials: typing.Optional[google.auth.credentials.Credentials] = None,
    model_monitor_id: typing.Optional[str] = None,
) -> vertexai.resources.preview.ml_monitoring.model_monitors.ModelMonitor

vertexai.resources.preview.ml_monitoring.ModelMonitor.create_schedule

create_schedule(
    cron: str,
    target_dataset: vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput,
    display_name: typing.Optional[str] = None,
    model_monitoring_job_display_name: typing.Optional[str] = None,
    start_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None,
    end_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None,
    tabular_objective_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective
    ] = None,
    baseline_dataset: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput
    ] = None,
    output_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec
    ] = None,
    notification_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec
    ] = None,
    explanation_spec: typing.Optional[
        google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec
    ] = None,
) -> google.cloud.aiplatform_v1beta1.types.schedule.Schedule

Creates a new Scheduled run for model monitoring job.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.create_schedule

vertexai.resources.preview.ml_monitoring.ModelMonitor.delete

delete(force: bool = False, sync: bool = True) -> None

Force delete the model monitor.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.delete

vertexai.resources.preview.ml_monitoring.ModelMonitor.delete_model_monitoring_job

delete_model_monitoring_job(model_monitoring_job_name: str) -> None

vertexai.resources.preview.ml_monitoring.ModelMonitor.delete_schedule

delete_schedule(schedule_name: str) -> None

vertexai.resources.preview.ml_monitoring.ModelMonitor.get_model_monitoring_job

get_model_monitoring_job(
    model_monitoring_job_name: str,
) -> vertexai.resources.preview.ml_monitoring.model_monitors.ModelMonitoringJob

vertexai.resources.preview.ml_monitoring.ModelMonitor.get_schedule

get_schedule(
    schedule_name: str,
) -> google.cloud.aiplatform_v1beta1.types.schedule.Schedule

vertexai.resources.preview.ml_monitoring.ModelMonitor.get_schema

get_schema() -> (
    google.cloud.aiplatform_v1beta1.types.model_monitor.ModelMonitoringSchema
)

Get the schema of the model monitor.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.get_schema

vertexai.resources.preview.ml_monitoring.ModelMonitor.list

list(
    filter: typing.Optional[str] = None,
    order_by: typing.Optional[str] = None,
    project: typing.Optional[str] = None,
    location: typing.Optional[str] = None,
    credentials: typing.Optional[google.auth.credentials.Credentials] = None,
    parent: typing.Optional[str] = None,
) -> typing.List[google.cloud.aiplatform.base.VertexAiResourceNoun]

List all instances of this Vertex AI Resource.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.list

vertexai.resources.preview.ml_monitoring.ModelMonitor.list_jobs

list_jobs(
    page_size: typing.Optional[int] = None, page_token: typing.Optional[str] = None
) -> ListJobsResponse.list_jobs

vertexai.resources.preview.ml_monitoring.ModelMonitor.list_schedules

list_schedules(
    filter: typing.Optional[str] = None,
    page_size: typing.Optional[int] = None,
    page_token: typing.Optional[str] = None,
) -> ListSchedulesResponse.list_schedules

vertexai.resources.preview.ml_monitoring.ModelMonitor.pause_schedule

pause_schedule(schedule_name: str) -> None

vertexai.resources.preview.ml_monitoring.ModelMonitor.resume_schedule

resume_schedule(schedule_name: str) -> None

vertexai.resources.preview.ml_monitoring.ModelMonitor.run

run(
    target_dataset: vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput,
    display_name: typing.Optional[str] = None,
    model_monitoring_job_id: typing.Optional[str] = None,
    sync: typing.Optional[bool] = False,
    tabular_objective_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective
    ] = None,
    baseline_dataset: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput
    ] = None,
    output_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec
    ] = None,
    notification_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec
    ] = None,
    explanation_spec: typing.Optional[
        google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec
    ] = None,
) -> vertexai.resources.preview.ml_monitoring.model_monitors.ModelMonitoringJob

Creates a new ModelMonitoringJob.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.run

vertexai.resources.preview.ml_monitoring.ModelMonitor.search_alerts

search_alerts(
    stats_name: typing.Optional[str] = None,
    objective_type: typing.Optional[str] = None,
    model_monitoring_job_name: typing.Optional[str] = None,
    start_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None,
    end_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None,
    page_size: typing.Optional[int] = None,
    page_token: typing.Optional[str] = None,
) -> typing.Dict[str, typing.Any]

vertexai.resources.preview.ml_monitoring.ModelMonitor.search_metrics

search_metrics(
    stats_name: typing.Optional[str] = None,
    objective_type: typing.Optional[str] = None,
    model_monitoring_job_name: typing.Optional[str] = None,
    schedule_name: typing.Optional[str] = None,
    algorithm: typing.Optional[str] = None,
    start_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None,
    end_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None,
    page_size: typing.Optional[int] = None,
    page_token: typing.Optional[str] = None,
) -> MetricsSearchResponse.monitoring_stats

vertexai.resources.preview.ml_monitoring.ModelMonitor.show_feature_attribution_drift_stats

show_feature_attribution_drift_stats(model_monitoring_job_name: str) -> None

The method to visualize the feature attribution drift result from a model monitoring job as a histogram chart and a table.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.show_feature_attribution_drift_stats

vertexai.resources.preview.ml_monitoring.ModelMonitor.show_feature_drift_stats

show_feature_drift_stats(model_monitoring_job_name: str) -> None

The method to visualize the feature drift result from a model monitoring job as a histogram chart and a table.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.show_feature_drift_stats

vertexai.resources.preview.ml_monitoring.ModelMonitor.show_output_drift_stats

show_output_drift_stats(model_monitoring_job_name: str) -> None

The method to visualize the prediction output drift result from a model monitoring job as a histogram chart and a table.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.show_output_drift_stats

vertexai.resources.preview.ml_monitoring.ModelMonitor.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.to_dict

vertexai.resources.preview.ml_monitoring.ModelMonitor.update

update(
    display_name: typing.Optional[str] = None,
    training_dataset: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput
    ] = None,
    model_monitoring_schema: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.schema.ModelMonitoringSchema
    ] = None,
    tabular_objective_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective
    ] = None,
    output_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec
    ] = None,
    notification_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec
    ] = None,
    explanation_spec: typing.Optional[
        google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec
    ] = None,
) -> vertexai.resources.preview.ml_monitoring.model_monitors.ModelMonitor

Updates an existing ModelMonitor.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.update

vertexai.resources.preview.ml_monitoring.ModelMonitor.update_schedule

update_schedule(
    schedule_name: str,
    display_name: typing.Optional[str] = None,
    model_monitoring_job_display_name: typing.Optional[str] = None,
    cron: typing.Optional[str] = None,
    baseline_dataset: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput
    ] = None,
    target_dataset: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput
    ] = None,
    tabular_objective_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective
    ] = None,
    output_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec
    ] = None,
    notification_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec
    ] = None,
    explanation_spec: typing.Optional[
        google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec
    ] = None,
    end_time: typing.Optional[google.protobuf.timestamp_pb2.Timestamp] = None,
) -> google.cloud.aiplatform_v1beta1.types.schedule.Schedule

vertexai.resources.preview.ml_monitoring.ModelMonitor.wait

wait()

Helper method that blocks until all futures are complete.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitor.wait

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob

ModelMonitoringJob(
    model_monitoring_job_name: str,
    model_monitor_id: typing.Optional[str] = None,
    project: typing.Optional[str] = None,
    location: typing.Optional[str] = None,
    credentials: typing.Optional[google.auth.credentials.Credentials] = None,
)

Initializes class with project, location, and api_client.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.create

create(
    model_monitor_name: typing.Optional[str] = None,
    target_dataset: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput
    ] = None,
    display_name: typing.Optional[str] = None,
    model_monitoring_job_id: typing.Optional[str] = None,
    project: typing.Optional[str] = None,
    location: typing.Optional[str] = None,
    credentials: typing.Optional[google.auth.credentials.Credentials] = None,
    baseline_dataset: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.MonitoringInput
    ] = None,
    tabular_objective_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.objective.TabularObjective
    ] = None,
    output_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.output.OutputSpec
    ] = None,
    notification_spec: typing.Optional[
        vertexai.resources.preview.ml_monitoring.spec.notification.NotificationSpec
    ] = None,
    explanation_spec: typing.Optional[
        google.cloud.aiplatform_v1beta1.types.explanation.ExplanationSpec
    ] = None,
    sync: bool = False,
) -> vertexai.resources.preview.ml_monitoring.model_monitors.ModelMonitoringJob

Creates a new ModelMonitoringJob.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.create

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.delete

delete() -> None

Deletes an Model Monitoring Job.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.delete

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.done

done() -> bool

Method indicating whether a job has completed.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.done

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.list

list(
    filter: typing.Optional[str] = None,
    order_by: typing.Optional[str] = None,
    project: typing.Optional[str] = None,
    location: typing.Optional[str] = None,
    credentials: typing.Optional[google.auth.credentials.Credentials] = None,
    parent: typing.Optional[str] = None,
) -> typing.List[google.cloud.aiplatform.base.VertexAiResourceNoun]

List all instances of this Vertex AI Resource.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.list

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.to_dict

to_dict() -> typing.Dict[str, typing.Any]

Returns the resource proto as a dictionary.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.to_dict

vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.wait

wait()

Helper method that blocks until all futures are complete.

See more: vertexai.resources.preview.ml_monitoring.ModelMonitoringJob.wait

vertexai.resources.preview.ml_monitoring.spec.ModelMonitoringSchema.to_json

to_json(output_dir: typing.Optional[str] = None) -> str

Transform ModelMonitoringSchema to json format.

See more: vertexai.resources.preview.ml_monitoring.spec.ModelMonitoringSchema.to_json

vertexai.vision_models.GeneratedImage

GeneratedImage(
    image_bytes: typing.Optional[bytes],
    generation_parameters: typing.Dict[str, typing.Any],
    gcs_uri: typing.Optional[str] = None,
)

Creates a GeneratedImage object.

See more: vertexai.vision_models.GeneratedImage

vertexai.vision_models.GeneratedImage.load_from_file

load_from_file(location: str) -> vertexai.preview.vision_models.GeneratedImage

vertexai.vision_models.GeneratedImage.save

save(location: str, include_generation_parameters: bool = True)

Saves image to a file.

See more: vertexai.vision_models.GeneratedImage.save

vertexai.vision_models.GeneratedImage.show

show()

Shows the image.

See more: vertexai.vision_models.GeneratedImage.show

vertexai.vision_models.Image

Image(
    image_bytes: typing.Optional[bytes] = None, gcs_uri: typing.Optional[str] = None
)

Creates an Image object.

See more: vertexai.vision_models.Image

vertexai.vision_models.Image.load_from_file

load_from_file(location: str) -> vertexai.vision_models.Image

Loads image from local file or Google Cloud Storage.

See more: vertexai.vision_models.Image.load_from_file

vertexai.vision_models.Image.save

save(location: str)

Saves image to a file.

See more: vertexai.vision_models.Image.save

vertexai.vision_models.Image.show

show()

Shows the image.

See more: vertexai.vision_models.Image.show

vertexai.vision_models.ImageCaptioningModel

ImageCaptioningModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.vision_models.ImageCaptioningModel

vertexai.vision_models.ImageCaptioningModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.vision_models.ImageCaptioningModel.get_captions

get_captions(
    image: vertexai.vision_models.Image,
    *,
    number_of_results: int = 1,
    language: str = "en",
    output_gcs_uri: typing.Optional[str] = None
) -> typing.List[str]

Generates captions for a given image.

See more: vertexai.vision_models.ImageCaptioningModel.get_captions

vertexai.vision_models.ImageGenerationModel

ImageGenerationModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.vision_models.ImageGenerationModel

vertexai.vision_models.ImageGenerationModel.edit_image

edit_image(
    *,
    prompt: str,
    base_image: typing.Optional[vertexai.vision_models.Image] = None,
    mask: typing.Optional[vertexai.vision_models.Image] = None,
    reference_images: typing.Optional[
        typing.List[vertexai.vision_models.ReferenceImage]
    ] = None,
    negative_prompt: typing.Optional[str] = None,
    number_of_images: int = 1,
    guidance_scale: typing.Optional[float] = None,
    edit_mode: typing.Optional[
        typing.Literal[
            "inpainting-insert", "inpainting-remove", "outpainting", "product-image"
        ]
    ] = None,
    mask_mode: typing.Optional[
        typing.Literal["background", "foreground", "semantic"]
    ] = None,
    segmentation_classes: typing.Optional[typing.List[str]] = None,
    mask_dilation: typing.Optional[float] = None,
    product_position: typing.Optional[typing.Literal["fixed", "reposition"]] = None,
    output_mime_type: typing.Optional[typing.Literal["image/png", "image/jpeg"]] = None,
    compression_quality: typing.Optional[float] = None,
    language: typing.Optional[str] = None,
    seed: typing.Optional[int] = None,
    output_gcs_uri: typing.Optional[str] = None,
    safety_filter_level: typing.Optional[
        typing.Literal["block_most", "block_some", "block_few", "block_fewest"]
    ] = None,
    person_generation: typing.Optional[
        typing.Literal["dont_allow", "allow_adult", "allow_all"]
    ] = None
) -> vertexai.preview.vision_models.ImageGenerationResponse

Edits an existing image based on text prompt.

See more: vertexai.vision_models.ImageGenerationModel.edit_image

vertexai.vision_models.ImageGenerationModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.vision_models.ImageGenerationModel.generate_images

generate_images(
    prompt: str,
    *,
    negative_prompt: typing.Optional[str] = None,
    number_of_images: int = 1,
    aspect_ratio: typing.Optional[
        typing.Literal["1:1", "9:16", "16:9", "4:3", "3:4"]
    ] = None,
    guidance_scale: typing.Optional[float] = None,
    language: typing.Optional[str] = None,
    seed: typing.Optional[int] = None,
    output_gcs_uri: typing.Optional[str] = None,
    add_watermark: typing.Optional[bool] = True,
    safety_filter_level: typing.Optional[
        typing.Literal["block_most", "block_some", "block_few", "block_fewest"]
    ] = None,
    person_generation: typing.Optional[
        typing.Literal["dont_allow", "allow_adult", "allow_all"]
    ] = None
) -> vertexai.preview.vision_models.ImageGenerationResponse

Generates images from text prompt.

See more: vertexai.vision_models.ImageGenerationModel.generate_images

vertexai.vision_models.ImageGenerationModel.upscale_image

upscale_image(
    image: typing.Union[
        vertexai.vision_models.Image, vertexai.preview.vision_models.GeneratedImage
    ],
    new_size: typing.Optional[int] = 2048,
    upscale_factor: typing.Optional[typing.Literal["x2", "x4"]] = None,
    output_mime_type: typing.Optional[
        typing.Literal["image/png", "image/jpeg"]
    ] = "image/png",
    output_compression_quality: typing.Optional[int] = None,
    output_gcs_uri: typing.Optional[str] = None,
) -> vertexai.vision_models.Image

vertexai.vision_models.ImageGenerationResponse.__getitem__

__getitem__(idx: int) -> vertexai.preview.vision_models.GeneratedImage

Gets the generated image by index.

See more: vertexai.vision_models.ImageGenerationResponse.getitem

vertexai.vision_models.ImageGenerationResponse.__iter__

__iter__() -> typing.Iterator[vertexai.preview.vision_models.GeneratedImage]

Iterates through the generated images.

See more: vertexai.vision_models.ImageGenerationResponse.iter

vertexai.vision_models.ImageQnAModel

ImageQnAModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.vision_models.ImageQnAModel

vertexai.vision_models.ImageQnAModel.ask_question

ask_question(
    image: vertexai.vision_models.Image, question: str, *, number_of_results: int = 1
) -> typing.List[str]

Answers questions about an image.

See more: vertexai.vision_models.ImageQnAModel.ask_question

vertexai.vision_models.ImageQnAModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

Loads a _ModelGardenModel.

See more: vertexai.vision_models.ImageQnAModel.from_pretrained

vertexai.vision_models.ImageTextModel

ImageTextModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.vision_models.ImageTextModel

vertexai.vision_models.ImageTextModel.ask_question

ask_question(
    image: vertexai.vision_models.Image, question: str, *, number_of_results: int = 1
) -> typing.List[str]

Answers questions about an image.

See more: vertexai.vision_models.ImageTextModel.ask_question

vertexai.vision_models.ImageTextModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

Loads a _ModelGardenModel.

See more: vertexai.vision_models.ImageTextModel.from_pretrained

vertexai.vision_models.ImageTextModel.get_captions

get_captions(
    image: vertexai.vision_models.Image,
    *,
    number_of_results: int = 1,
    language: str = "en",
    output_gcs_uri: typing.Optional[str] = None
) -> typing.List[str]

Generates captions for a given image.

See more: vertexai.vision_models.ImageTextModel.get_captions

vertexai.vision_models.MultiModalEmbeddingModel

MultiModalEmbeddingModel(model_id: str, endpoint_name: typing.Optional[str] = None)

Creates a _ModelGardenModel.

See more: vertexai.vision_models.MultiModalEmbeddingModel

vertexai.vision_models.MultiModalEmbeddingModel.from_pretrained

from_pretrained(model_name: str) -> vertexai._model_garden._model_garden_models.T

vertexai.vision_models.MultiModalEmbeddingModel.get_embeddings

get_embeddings(
    image: typing.Optional[vertexai.vision_models.Image] = None,
    video: typing.Optional[vertexai.vision_models.Video] = None,
    contextual_text: typing.Optional[str] = None,
    dimension: typing.Optional[int] = None,
    video_segment_config: typing.Optional[
        vertexai.vision_models.VideoSegmentConfig
    ] = None,
) -> vertexai.vision_models.MultiModalEmbeddingResponse

Gets embedding vectors from the provided image.

See more: vertexai.vision_models.MultiModalEmbeddingModel.get_embeddings

vertexai.vision_models.Video

Video(
    video_bytes: typing.Optional[bytes] = None, gcs_uri: typing.Optional[str] = None
)

Creates a Video object.

See more: vertexai.vision_models.Video

vertexai.vision_models.Video.load_from_file

load_from_file(location: str) -> vertexai.vision_models.Video

Loads video from local file or Google Cloud Storage.

See more: vertexai.vision_models.Video.load_from_file

vertexai.vision_models.Video.save

save(location: str)

Saves video to a file.

See more: vertexai.vision_models.Video.save

vertexai.vision_models.VideoEmbedding

VideoEmbedding(
    start_offset_sec: int, end_offset_sec: int, embedding: typing.List[float]
)

Creates a VideoEmbedding object.

See more: vertexai.vision_models.VideoEmbedding

vertexai.vision_models.VideoSegmentConfig

VideoSegmentConfig(
    start_offset_sec: int = 0, end_offset_sec: int = 120, interval_sec: int = 16
)

Creates a VideoSegmentConfig object.

See more: vertexai.vision_models.VideoSegmentConfig