public final class InferenceParameter extends GeneratedMessageV3 implements InferenceParameterOrBuilder
The parameters of inference.
Protobuf type google.cloud.dialogflow.v2beta1.InferenceParameter
Inherited Members
com.google.protobuf.GeneratedMessageV3.<ListT>makeMutableCopy(ListT)
com.google.protobuf.GeneratedMessageV3.<ListT>makeMutableCopy(ListT,int)
com.google.protobuf.GeneratedMessageV3.<T>emptyList(java.lang.Class<T>)
com.google.protobuf.GeneratedMessageV3.internalGetMapFieldReflection(int)
Static Fields
MAX_OUTPUT_TOKENS_FIELD_NUMBER
public static final int MAX_OUTPUT_TOKENS_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
TEMPERATURE_FIELD_NUMBER
public static final int TEMPERATURE_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
TOP_K_FIELD_NUMBER
public static final int TOP_K_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
TOP_P_FIELD_NUMBER
public static final int TOP_P_FIELD_NUMBER
Field Value |
Type |
Description |
int |
|
Static Methods
getDefaultInstance()
public static InferenceParameter getDefaultInstance()
getDescriptor()
public static final Descriptors.Descriptor getDescriptor()
newBuilder()
public static InferenceParameter.Builder newBuilder()
newBuilder(InferenceParameter prototype)
public static InferenceParameter.Builder newBuilder(InferenceParameter prototype)
public static InferenceParameter parseDelimitedFrom(InputStream input)
public static InferenceParameter parseDelimitedFrom(InputStream input, ExtensionRegistryLite extensionRegistry)
parseFrom(byte[] data)
public static InferenceParameter parseFrom(byte[] data)
Parameter |
Name |
Description |
data |
byte[]
|
parseFrom(byte[] data, ExtensionRegistryLite extensionRegistry)
public static InferenceParameter parseFrom(byte[] data, ExtensionRegistryLite extensionRegistry)
parseFrom(ByteString data)
public static InferenceParameter parseFrom(ByteString data)
parseFrom(ByteString data, ExtensionRegistryLite extensionRegistry)
public static InferenceParameter parseFrom(ByteString data, ExtensionRegistryLite extensionRegistry)
public static InferenceParameter parseFrom(CodedInputStream input)
public static InferenceParameter parseFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)
public static InferenceParameter parseFrom(InputStream input)
public static InferenceParameter parseFrom(InputStream input, ExtensionRegistryLite extensionRegistry)
parseFrom(ByteBuffer data)
public static InferenceParameter parseFrom(ByteBuffer data)
parseFrom(ByteBuffer data, ExtensionRegistryLite extensionRegistry)
public static InferenceParameter parseFrom(ByteBuffer data, ExtensionRegistryLite extensionRegistry)
parser()
public static Parser<InferenceParameter> parser()
Methods
equals(Object obj)
public boolean equals(Object obj)
Parameter |
Name |
Description |
obj |
Object
|
Overrides
getDefaultInstanceForType()
public InferenceParameter getDefaultInstanceForType()
getMaxOutputTokens()
public int getMaxOutputTokens()
Optional. Maximum number of the output tokens for the generator.
optional int32 max_output_tokens = 1 [(.google.api.field_behavior) = OPTIONAL];
Returns |
Type |
Description |
int |
The maxOutputTokens.
|
getParserForType()
public Parser<InferenceParameter> getParserForType()
Overrides
getSerializedSize()
public int getSerializedSize()
Returns |
Type |
Description |
int |
|
Overrides
getTemperature()
public double getTemperature()
Optional. Controls the randomness of LLM predictions.
Low temperature = less random. High temperature = more random.
If unset (or 0), uses a default value of 0.
optional double temperature = 2 [(.google.api.field_behavior) = OPTIONAL];
Returns |
Type |
Description |
double |
The temperature.
|
getTopK()
Optional. Top-k changes how the model selects tokens for output. A top-k of
1 means the selected token is the most probable among all tokens in the
model's vocabulary (also called greedy decoding), while a top-k of 3 means
that the next token is selected from among the 3 most probable tokens
(using temperature). For each token selection step, the top K tokens with
the highest probabilities are sampled. Then tokens are further filtered
based on topP with the final token selected using temperature sampling.
Specify a lower value for less random responses and a higher value for more
random responses. Acceptable value is [1, 40], default to 40.
optional int32 top_k = 3 [(.google.api.field_behavior) = OPTIONAL];
Returns |
Type |
Description |
int |
The topK.
|
getTopP()
Optional. Top-p changes how the model selects tokens for output. Tokens are
selected from most K (see topK parameter) probable to least until the sum
of their probabilities equals the top-p value. For example, if tokens A, B,
and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5,
then the model will select either A or B as the next token (using
temperature) and doesn't consider C. The default top-p value is 0.95.
Specify a lower value for less random responses and a higher value for more
random responses. Acceptable value is [0.0, 1.0], default to 0.95.
optional double top_p = 4 [(.google.api.field_behavior) = OPTIONAL];
Returns |
Type |
Description |
double |
The topP.
|
hasMaxOutputTokens()
public boolean hasMaxOutputTokens()
Optional. Maximum number of the output tokens for the generator.
optional int32 max_output_tokens = 1 [(.google.api.field_behavior) = OPTIONAL];
Returns |
Type |
Description |
boolean |
Whether the maxOutputTokens field is set.
|
hasTemperature()
public boolean hasTemperature()
Optional. Controls the randomness of LLM predictions.
Low temperature = less random. High temperature = more random.
If unset (or 0), uses a default value of 0.
optional double temperature = 2 [(.google.api.field_behavior) = OPTIONAL];
Returns |
Type |
Description |
boolean |
Whether the temperature field is set.
|
hasTopK()
Optional. Top-k changes how the model selects tokens for output. A top-k of
1 means the selected token is the most probable among all tokens in the
model's vocabulary (also called greedy decoding), while a top-k of 3 means
that the next token is selected from among the 3 most probable tokens
(using temperature). For each token selection step, the top K tokens with
the highest probabilities are sampled. Then tokens are further filtered
based on topP with the final token selected using temperature sampling.
Specify a lower value for less random responses and a higher value for more
random responses. Acceptable value is [1, 40], default to 40.
optional int32 top_k = 3 [(.google.api.field_behavior) = OPTIONAL];
Returns |
Type |
Description |
boolean |
Whether the topK field is set.
|
hasTopP()
Optional. Top-p changes how the model selects tokens for output. Tokens are
selected from most K (see topK parameter) probable to least until the sum
of their probabilities equals the top-p value. For example, if tokens A, B,
and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5,
then the model will select either A or B as the next token (using
temperature) and doesn't consider C. The default top-p value is 0.95.
Specify a lower value for less random responses and a higher value for more
random responses. Acceptable value is [0.0, 1.0], default to 0.95.
optional double top_p = 4 [(.google.api.field_behavior) = OPTIONAL];
Returns |
Type |
Description |
boolean |
Whether the topP field is set.
|
hashCode()
Returns |
Type |
Description |
int |
|
Overrides
internalGetFieldAccessorTable()
protected GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
Overrides
isInitialized()
public final boolean isInitialized()
Overrides
newBuilderForType()
public InferenceParameter.Builder newBuilderForType()
newBuilderForType(GeneratedMessageV3.BuilderParent parent)
protected InferenceParameter.Builder newBuilderForType(GeneratedMessageV3.BuilderParent parent)
Overrides
newInstance(GeneratedMessageV3.UnusedPrivateParameter unused)
protected Object newInstance(GeneratedMessageV3.UnusedPrivateParameter unused)
Returns |
Type |
Description |
Object |
|
Overrides
toBuilder()
public InferenceParameter.Builder toBuilder()
writeTo(CodedOutputStream output)
public void writeTo(CodedOutputStream output)
Overrides