- 1.73.0 (latest)
- 1.72.0
- 1.71.0
- 1.70.0
- 1.69.0
- 1.68.0
- 1.67.0
- 1.66.0
- 1.65.0
- 1.64.0
- 1.63.0
- 1.62.0
- 1.61.0
- 1.60.0
- 1.59.0
- 1.58.0
- 1.57.0
- 1.56.0
- 1.55.0
- 1.54.0
- 1.53.1
- 1.52.0
- 1.49.0
- 1.48.0
- 1.47.0
- 1.46.0
- 1.45.1
- 1.44.0
- 1.43.0
- 1.42.0
- 1.41.0
- 1.40.0
- 1.39.0
- 1.38.0
- 1.37.0
- 1.36.0
- 1.35.0
- 1.34.1
- 1.33.0
- 1.32.0
- 1.31.0
- 1.30.1
- 1.29.0
- 1.28.0
- 1.27.0
- 1.26.0
- 1.25.0
- 1.24.1
- 1.23.0
- 1.22.0
- 1.21.0
- 1.20.0
- 1.19.0
- 1.18.0
- 1.17.0
- 1.16.0
- 1.15.0
- 1.14.1
- 1.13.0
Variables
PlanNode_Kind_name, PlanNode_Kind_value
var (
PlanNode_Kind_name = map[int32]string{
0: "KIND_UNSPECIFIED",
1: "RELATIONAL",
2: "SCALAR",
}
PlanNode_Kind_value = map[string]int32{
"KIND_UNSPECIFIED": 0,
"RELATIONAL": 1,
"SCALAR": 2,
}
)
Enum value maps for PlanNode_Kind.
RequestOptions_Priority_name, RequestOptions_Priority_value
var (
RequestOptions_Priority_name = map[int32]string{
0: "PRIORITY_UNSPECIFIED",
1: "PRIORITY_LOW",
2: "PRIORITY_MEDIUM",
3: "PRIORITY_HIGH",
}
RequestOptions_Priority_value = map[string]int32{
"PRIORITY_UNSPECIFIED": 0,
"PRIORITY_LOW": 1,
"PRIORITY_MEDIUM": 2,
"PRIORITY_HIGH": 3,
}
)
Enum value maps for RequestOptions_Priority.
DirectedReadOptions_ReplicaSelection_Type_name, DirectedReadOptions_ReplicaSelection_Type_value
var (
DirectedReadOptions_ReplicaSelection_Type_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "READ_WRITE",
2: "READ_ONLY",
}
DirectedReadOptions_ReplicaSelection_Type_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"READ_WRITE": 1,
"READ_ONLY": 2,
}
)
Enum value maps for DirectedReadOptions_ReplicaSelection_Type.
ExecuteSqlRequest_QueryMode_name, ExecuteSqlRequest_QueryMode_value
var (
ExecuteSqlRequest_QueryMode_name = map[int32]string{
0: "NORMAL",
1: "PLAN",
2: "PROFILE",
}
ExecuteSqlRequest_QueryMode_value = map[string]int32{
"NORMAL": 0,
"PLAN": 1,
"PROFILE": 2,
}
)
Enum value maps for ExecuteSqlRequest_QueryMode.
ReadRequest_OrderBy_name, ReadRequest_OrderBy_value
var (
ReadRequest_OrderBy_name = map[int32]string{
0: "ORDER_BY_UNSPECIFIED",
1: "ORDER_BY_PRIMARY_KEY",
2: "ORDER_BY_NO_ORDER",
}
ReadRequest_OrderBy_value = map[string]int32{
"ORDER_BY_UNSPECIFIED": 0,
"ORDER_BY_PRIMARY_KEY": 1,
"ORDER_BY_NO_ORDER": 2,
}
)
Enum value maps for ReadRequest_OrderBy.
ReadRequest_LockHint_name, ReadRequest_LockHint_value
var (
ReadRequest_LockHint_name = map[int32]string{
0: "LOCK_HINT_UNSPECIFIED",
1: "LOCK_HINT_SHARED",
2: "LOCK_HINT_EXCLUSIVE",
}
ReadRequest_LockHint_value = map[string]int32{
"LOCK_HINT_UNSPECIFIED": 0,
"LOCK_HINT_SHARED": 1,
"LOCK_HINT_EXCLUSIVE": 2,
}
)
Enum value maps for ReadRequest_LockHint.
TransactionOptions_ReadWrite_ReadLockMode_name, TransactionOptions_ReadWrite_ReadLockMode_value
var (
TransactionOptions_ReadWrite_ReadLockMode_name = map[int32]string{
0: "READ_LOCK_MODE_UNSPECIFIED",
1: "PESSIMISTIC",
2: "OPTIMISTIC",
}
TransactionOptions_ReadWrite_ReadLockMode_value = map[string]int32{
"READ_LOCK_MODE_UNSPECIFIED": 0,
"PESSIMISTIC": 1,
"OPTIMISTIC": 2,
}
)
Enum value maps for TransactionOptions_ReadWrite_ReadLockMode.
TypeCode_name, TypeCode_value
var (
TypeCode_name = map[int32]string{
0: "TYPE_CODE_UNSPECIFIED",
1: "BOOL",
2: "INT64",
3: "FLOAT64",
15: "FLOAT32",
4: "TIMESTAMP",
5: "DATE",
6: "STRING",
7: "BYTES",
8: "ARRAY",
9: "STRUCT",
10: "NUMERIC",
11: "JSON",
13: "PROTO",
14: "ENUM",
}
TypeCode_value = map[string]int32{
"TYPE_CODE_UNSPECIFIED": 0,
"BOOL": 1,
"INT64": 2,
"FLOAT64": 3,
"FLOAT32": 15,
"TIMESTAMP": 4,
"DATE": 5,
"STRING": 6,
"BYTES": 7,
"ARRAY": 8,
"STRUCT": 9,
"NUMERIC": 10,
"JSON": 11,
"PROTO": 13,
"ENUM": 14,
}
)
Enum value maps for TypeCode.
TypeAnnotationCode_name, TypeAnnotationCode_value
var (
TypeAnnotationCode_name = map[int32]string{
0: "TYPE_ANNOTATION_CODE_UNSPECIFIED",
2: "PG_NUMERIC",
3: "PG_JSONB",
4: "PG_OID",
}
TypeAnnotationCode_value = map[string]int32{
"TYPE_ANNOTATION_CODE_UNSPECIFIED": 0,
"PG_NUMERIC": 2,
"PG_JSONB": 3,
"PG_OID": 4,
}
)
Enum value maps for TypeAnnotationCode.
File_google_spanner_v1_commit_response_proto
var File_google_spanner_v1_commit_response_proto protoreflect.FileDescriptor
File_google_spanner_v1_keys_proto
var File_google_spanner_v1_keys_proto protoreflect.FileDescriptor
File_google_spanner_v1_mutation_proto
var File_google_spanner_v1_mutation_proto protoreflect.FileDescriptor
File_google_spanner_v1_query_plan_proto
var File_google_spanner_v1_query_plan_proto protoreflect.FileDescriptor
File_google_spanner_v1_result_set_proto
var File_google_spanner_v1_result_set_proto protoreflect.FileDescriptor
File_google_spanner_v1_spanner_proto
var File_google_spanner_v1_spanner_proto protoreflect.FileDescriptor
File_google_spanner_v1_transaction_proto
var File_google_spanner_v1_transaction_proto protoreflect.FileDescriptor
File_google_spanner_v1_type_proto
var File_google_spanner_v1_type_proto protoreflect.FileDescriptor
Functions
func RegisterSpannerServer
func RegisterSpannerServer(s *grpc.Server, srv SpannerServer)
BatchCreateSessionsRequest
type BatchCreateSessionsRequest struct {
// Required. The database in which the new sessions are created.
Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
// Parameters to be applied to each created session.
SessionTemplate *Session `protobuf:"bytes,2,opt,name=session_template,json=sessionTemplate,proto3" json:"session_template,omitempty"`
// Required. The number of sessions to be created in this batch call.
// The API may return fewer than the requested number of sessions. If a
// specific number of sessions are desired, the client can make additional
// calls to BatchCreateSessions (adjusting
// [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count]
// as necessary).
SessionCount int32 `protobuf:"varint,3,opt,name=session_count,json=sessionCount,proto3" json:"session_count,omitempty"`
// contains filtered or unexported fields
}
The request for [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
func (*BatchCreateSessionsRequest) Descriptor
func (*BatchCreateSessionsRequest) Descriptor() ([]byte, []int)
Deprecated: Use BatchCreateSessionsRequest.ProtoReflect.Descriptor instead.
func (*BatchCreateSessionsRequest) GetDatabase
func (x *BatchCreateSessionsRequest) GetDatabase() string
func (*BatchCreateSessionsRequest) GetSessionCount
func (x *BatchCreateSessionsRequest) GetSessionCount() int32
func (*BatchCreateSessionsRequest) GetSessionTemplate
func (x *BatchCreateSessionsRequest) GetSessionTemplate() *Session
func (*BatchCreateSessionsRequest) ProtoMessage
func (*BatchCreateSessionsRequest) ProtoMessage()
func (*BatchCreateSessionsRequest) ProtoReflect
func (x *BatchCreateSessionsRequest) ProtoReflect() protoreflect.Message
func (*BatchCreateSessionsRequest) Reset
func (x *BatchCreateSessionsRequest) Reset()
func (*BatchCreateSessionsRequest) String
func (x *BatchCreateSessionsRequest) String() string
BatchCreateSessionsResponse
type BatchCreateSessionsResponse struct {
// The freshly created sessions.
Session []*Session `protobuf:"bytes,1,rep,name=session,proto3" json:"session,omitempty"`
// contains filtered or unexported fields
}
The response for [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
func (*BatchCreateSessionsResponse) Descriptor
func (*BatchCreateSessionsResponse) Descriptor() ([]byte, []int)
Deprecated: Use BatchCreateSessionsResponse.ProtoReflect.Descriptor instead.
func (*BatchCreateSessionsResponse) GetSession
func (x *BatchCreateSessionsResponse) GetSession() []*Session
func (*BatchCreateSessionsResponse) ProtoMessage
func (*BatchCreateSessionsResponse) ProtoMessage()
func (*BatchCreateSessionsResponse) ProtoReflect
func (x *BatchCreateSessionsResponse) ProtoReflect() protoreflect.Message
func (*BatchCreateSessionsResponse) Reset
func (x *BatchCreateSessionsResponse) Reset()
func (*BatchCreateSessionsResponse) String
func (x *BatchCreateSessionsResponse) String() string
BatchWriteRequest
type BatchWriteRequest struct {
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
RequestOptions *RequestOptions `protobuf:"bytes,3,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
MutationGroups []*BatchWriteRequest_MutationGroup `protobuf:"bytes,4,rep,name=mutation_groups,json=mutationGroups,proto3" json:"mutation_groups,omitempty"`
ExcludeTxnFromChangeStreams bool "" /* 149 byte string literal not displayed */
}
The request for [BatchWrite][google.spanner.v1.Spanner.BatchWrite].
func (*BatchWriteRequest) Descriptor
func (*BatchWriteRequest) Descriptor() ([]byte, []int)
Deprecated: Use BatchWriteRequest.ProtoReflect.Descriptor instead.
func (*BatchWriteRequest) GetExcludeTxnFromChangeStreams
func (x *BatchWriteRequest) GetExcludeTxnFromChangeStreams() bool
func (*BatchWriteRequest) GetMutationGroups
func (x *BatchWriteRequest) GetMutationGroups() []*BatchWriteRequest_MutationGroup
func (*BatchWriteRequest) GetRequestOptions
func (x *BatchWriteRequest) GetRequestOptions() *RequestOptions
func (*BatchWriteRequest) GetSession
func (x *BatchWriteRequest) GetSession() string
func (*BatchWriteRequest) ProtoMessage
func (*BatchWriteRequest) ProtoMessage()
func (*BatchWriteRequest) ProtoReflect
func (x *BatchWriteRequest) ProtoReflect() protoreflect.Message
func (*BatchWriteRequest) Reset
func (x *BatchWriteRequest) Reset()
func (*BatchWriteRequest) String
func (x *BatchWriteRequest) String() string
BatchWriteRequest_MutationGroup
type BatchWriteRequest_MutationGroup struct {
// Required. The mutations in this group.
Mutations []*Mutation `protobuf:"bytes,1,rep,name=mutations,proto3" json:"mutations,omitempty"`
// contains filtered or unexported fields
}
A group of mutations to be committed together. Related mutations should be placed in a group. For example, two mutations inserting rows with the same primary key prefix in both parent and child tables are related.
func (*BatchWriteRequest_MutationGroup) Descriptor
func (*BatchWriteRequest_MutationGroup) Descriptor() ([]byte, []int)
Deprecated: Use BatchWriteRequest_MutationGroup.ProtoReflect.Descriptor instead.
func (*BatchWriteRequest_MutationGroup) GetMutations
func (x *BatchWriteRequest_MutationGroup) GetMutations() []*Mutation
func (*BatchWriteRequest_MutationGroup) ProtoMessage
func (*BatchWriteRequest_MutationGroup) ProtoMessage()
func (*BatchWriteRequest_MutationGroup) ProtoReflect
func (x *BatchWriteRequest_MutationGroup) ProtoReflect() protoreflect.Message
func (*BatchWriteRequest_MutationGroup) Reset
func (x *BatchWriteRequest_MutationGroup) Reset()
func (*BatchWriteRequest_MutationGroup) String
func (x *BatchWriteRequest_MutationGroup) String() string
BatchWriteResponse
type BatchWriteResponse struct {
// The mutation groups applied in this batch. The values index into the
// `mutation_groups` field in the corresponding `BatchWriteRequest`.
Indexes []int32 `protobuf:"varint,1,rep,packed,name=indexes,proto3" json:"indexes,omitempty"`
// An `OK` status indicates success. Any other status indicates a failure.
Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
// The commit timestamp of the transaction that applied this batch.
// Present if `status` is `OK`, absent otherwise.
CommitTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=commit_timestamp,json=commitTimestamp,proto3" json:"commit_timestamp,omitempty"`
// contains filtered or unexported fields
}
The result of applying a batch of mutations.
func (*BatchWriteResponse) Descriptor
func (*BatchWriteResponse) Descriptor() ([]byte, []int)
Deprecated: Use BatchWriteResponse.ProtoReflect.Descriptor instead.
func (*BatchWriteResponse) GetCommitTimestamp
func (x *BatchWriteResponse) GetCommitTimestamp() *timestamppb.Timestamp
func (*BatchWriteResponse) GetIndexes
func (x *BatchWriteResponse) GetIndexes() []int32
func (*BatchWriteResponse) GetStatus
func (x *BatchWriteResponse) GetStatus() *status.Status
func (*BatchWriteResponse) ProtoMessage
func (*BatchWriteResponse) ProtoMessage()
func (*BatchWriteResponse) ProtoReflect
func (x *BatchWriteResponse) ProtoReflect() protoreflect.Message
func (*BatchWriteResponse) Reset
func (x *BatchWriteResponse) Reset()
func (*BatchWriteResponse) String
func (x *BatchWriteResponse) String() string
BeginTransactionRequest
type BeginTransactionRequest struct {
// Required. The session in which the transaction runs.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Required. Options for the new transaction.
Options *TransactionOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
// Common options for this request.
// Priority is ignored for this request. Setting the priority in this
// request_options struct will not do anything. To set the priority for a
// transaction, set it on the reads and writes that are part of this
// transaction instead.
RequestOptions *RequestOptions `protobuf:"bytes,3,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
// contains filtered or unexported fields
}
The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
func (*BeginTransactionRequest) Descriptor
func (*BeginTransactionRequest) Descriptor() ([]byte, []int)
Deprecated: Use BeginTransactionRequest.ProtoReflect.Descriptor instead.
func (*BeginTransactionRequest) GetOptions
func (x *BeginTransactionRequest) GetOptions() *TransactionOptions
func (*BeginTransactionRequest) GetRequestOptions
func (x *BeginTransactionRequest) GetRequestOptions() *RequestOptions
func (*BeginTransactionRequest) GetSession
func (x *BeginTransactionRequest) GetSession() string
func (*BeginTransactionRequest) ProtoMessage
func (*BeginTransactionRequest) ProtoMessage()
func (*BeginTransactionRequest) ProtoReflect
func (x *BeginTransactionRequest) ProtoReflect() protoreflect.Message
func (*BeginTransactionRequest) Reset
func (x *BeginTransactionRequest) Reset()
func (*BeginTransactionRequest) String
func (x *BeginTransactionRequest) String() string
CommitRequest
type CommitRequest struct {
// Required. The session in which the transaction to be committed is running.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Required. The transaction in which to commit.
//
// Types that are assignable to Transaction:
//
// *CommitRequest_TransactionId
// *CommitRequest_SingleUseTransaction
Transaction isCommitRequest_Transaction `protobuf_oneof:"transaction"`
// The mutations to be executed when this transaction commits. All
// mutations are applied atomically, in the order they appear in
// this list.
Mutations []*Mutation `protobuf:"bytes,4,rep,name=mutations,proto3" json:"mutations,omitempty"`
// If `true`, then statistics related to the transaction will be included in
// the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats].
// Default value is `false`.
ReturnCommitStats bool `protobuf:"varint,5,opt,name=return_commit_stats,json=returnCommitStats,proto3" json:"return_commit_stats,omitempty"`
// Optional. The amount of latency this request is willing to incur in order
// to improve throughput. If this field is not set, Spanner assumes requests
// are relatively latency sensitive and automatically determines an
// appropriate delay time. You can specify a batching delay value between 0
// and 500 ms.
MaxCommitDelay *durationpb.Duration `protobuf:"bytes,8,opt,name=max_commit_delay,json=maxCommitDelay,proto3" json:"max_commit_delay,omitempty"`
// Common options for this request.
RequestOptions *RequestOptions `protobuf:"bytes,6,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
// contains filtered or unexported fields
}
The request for [Commit][google.spanner.v1.Spanner.Commit].
func (*CommitRequest) Descriptor
func (*CommitRequest) Descriptor() ([]byte, []int)
Deprecated: Use CommitRequest.ProtoReflect.Descriptor instead.
func (*CommitRequest) GetMaxCommitDelay
func (x *CommitRequest) GetMaxCommitDelay() *durationpb.Duration
func (*CommitRequest) GetMutations
func (x *CommitRequest) GetMutations() []*Mutation
func (*CommitRequest) GetRequestOptions
func (x *CommitRequest) GetRequestOptions() *RequestOptions
func (*CommitRequest) GetReturnCommitStats
func (x *CommitRequest) GetReturnCommitStats() bool
func (*CommitRequest) GetSession
func (x *CommitRequest) GetSession() string
func (*CommitRequest) GetSingleUseTransaction
func (x *CommitRequest) GetSingleUseTransaction() *TransactionOptions
func (*CommitRequest) GetTransaction
func (m *CommitRequest) GetTransaction() isCommitRequest_Transaction
func (*CommitRequest) GetTransactionId
func (x *CommitRequest) GetTransactionId() []byte
func (*CommitRequest) ProtoMessage
func (*CommitRequest) ProtoMessage()
func (*CommitRequest) ProtoReflect
func (x *CommitRequest) ProtoReflect() protoreflect.Message
func (*CommitRequest) Reset
func (x *CommitRequest) Reset()
func (*CommitRequest) String
func (x *CommitRequest) String() string
CommitRequest_SingleUseTransaction
type CommitRequest_SingleUseTransaction struct {
// Execute mutations in a temporary transaction. Note that unlike
// commit of a previously-started transaction, commit with a
// temporary transaction is non-idempotent. That is, if the
// `CommitRequest` is sent to Cloud Spanner more than once (for
// instance, due to retries in the application, or in the
// transport library), it is possible that the mutations are
// executed more than once. If this is undesirable, use
// [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
// [Commit][google.spanner.v1.Spanner.Commit] instead.
SingleUseTransaction *TransactionOptions `protobuf:"bytes,3,opt,name=single_use_transaction,json=singleUseTransaction,proto3,oneof"`
}
CommitRequest_TransactionId
type CommitRequest_TransactionId struct {
// Commit a previously-started transaction.
TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3,oneof"`
}
CommitResponse
type CommitResponse struct {
// The Cloud Spanner timestamp at which the transaction committed.
CommitTimestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_timestamp,json=commitTimestamp,proto3" json:"commit_timestamp,omitempty"`
// The statistics about this Commit. Not returned by default.
// For more information, see
// [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
CommitStats *CommitResponse_CommitStats `protobuf:"bytes,2,opt,name=commit_stats,json=commitStats,proto3" json:"commit_stats,omitempty"`
// contains filtered or unexported fields
}
The response for [Commit][google.spanner.v1.Spanner.Commit].
func (*CommitResponse) Descriptor
func (*CommitResponse) Descriptor() ([]byte, []int)
Deprecated: Use CommitResponse.ProtoReflect.Descriptor instead.
func (*CommitResponse) GetCommitStats
func (x *CommitResponse) GetCommitStats() *CommitResponse_CommitStats
func (*CommitResponse) GetCommitTimestamp
func (x *CommitResponse) GetCommitTimestamp() *timestamppb.Timestamp
func (*CommitResponse) ProtoMessage
func (*CommitResponse) ProtoMessage()
func (*CommitResponse) ProtoReflect
func (x *CommitResponse) ProtoReflect() protoreflect.Message
func (*CommitResponse) Reset
func (x *CommitResponse) Reset()
func (*CommitResponse) String
func (x *CommitResponse) String() string
CommitResponse_CommitStats
type CommitResponse_CommitStats struct {
// The total number of mutations for the transaction. Knowing the
// `mutation_count` value can help you maximize the number of mutations
// in a transaction and minimize the number of API round trips. You can
// also monitor this value to prevent transactions from exceeding the system
// [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data).
// If the number of mutations exceeds the limit, the server returns
// [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT).
MutationCount int64 `protobuf:"varint,1,opt,name=mutation_count,json=mutationCount,proto3" json:"mutation_count,omitempty"`
// contains filtered or unexported fields
}
Additional statistics about a commit.
func (*CommitResponse_CommitStats) Descriptor
func (*CommitResponse_CommitStats) Descriptor() ([]byte, []int)
Deprecated: Use CommitResponse_CommitStats.ProtoReflect.Descriptor instead.
func (*CommitResponse_CommitStats) GetMutationCount
func (x *CommitResponse_CommitStats) GetMutationCount() int64
func (*CommitResponse_CommitStats) ProtoMessage
func (*CommitResponse_CommitStats) ProtoMessage()
func (*CommitResponse_CommitStats) ProtoReflect
func (x *CommitResponse_CommitStats) ProtoReflect() protoreflect.Message
func (*CommitResponse_CommitStats) Reset
func (x *CommitResponse_CommitStats) Reset()
func (*CommitResponse_CommitStats) String
func (x *CommitResponse_CommitStats) String() string
CreateSessionRequest
type CreateSessionRequest struct {
// Required. The database in which the new session is created.
Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
// Required. The session to create.
Session *Session `protobuf:"bytes,2,opt,name=session,proto3" json:"session,omitempty"`
// contains filtered or unexported fields
}
The request for [CreateSession][google.spanner.v1.Spanner.CreateSession].
func (*CreateSessionRequest) Descriptor
func (*CreateSessionRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateSessionRequest.ProtoReflect.Descriptor instead.
func (*CreateSessionRequest) GetDatabase
func (x *CreateSessionRequest) GetDatabase() string
func (*CreateSessionRequest) GetSession
func (x *CreateSessionRequest) GetSession() *Session
func (*CreateSessionRequest) ProtoMessage
func (*CreateSessionRequest) ProtoMessage()
func (*CreateSessionRequest) ProtoReflect
func (x *CreateSessionRequest) ProtoReflect() protoreflect.Message
func (*CreateSessionRequest) Reset
func (x *CreateSessionRequest) Reset()
func (*CreateSessionRequest) String
func (x *CreateSessionRequest) String() string
DeleteSessionRequest
type DeleteSessionRequest struct {
// Required. The name of the session to delete.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession].
func (*DeleteSessionRequest) Descriptor
func (*DeleteSessionRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeleteSessionRequest.ProtoReflect.Descriptor instead.
func (*DeleteSessionRequest) GetName
func (x *DeleteSessionRequest) GetName() string
func (*DeleteSessionRequest) ProtoMessage
func (*DeleteSessionRequest) ProtoMessage()
func (*DeleteSessionRequest) ProtoReflect
func (x *DeleteSessionRequest) ProtoReflect() protoreflect.Message
func (*DeleteSessionRequest) Reset
func (x *DeleteSessionRequest) Reset()
func (*DeleteSessionRequest) String
func (x *DeleteSessionRequest) String() string
DirectedReadOptions
type DirectedReadOptions struct {
// Required. At most one of either include_replicas or exclude_replicas
// should be present in the message.
//
// Types that are assignable to Replicas:
//
// *DirectedReadOptions_IncludeReplicas_
// *DirectedReadOptions_ExcludeReplicas_
Replicas isDirectedReadOptions_Replicas `protobuf_oneof:"replicas"`
// contains filtered or unexported fields
}
The DirectedReadOptions can be used to indicate which replicas or regions should be used for non-transactional reads or queries.
DirectedReadOptions may only be specified for a read-only transaction,
otherwise the API will return an INVALID_ARGUMENT
error.
func (*DirectedReadOptions) Descriptor
func (*DirectedReadOptions) Descriptor() ([]byte, []int)
Deprecated: Use DirectedReadOptions.ProtoReflect.Descriptor instead.
func (*DirectedReadOptions) GetExcludeReplicas
func (x *DirectedReadOptions) GetExcludeReplicas() *DirectedReadOptions_ExcludeReplicas
func (*DirectedReadOptions) GetIncludeReplicas
func (x *DirectedReadOptions) GetIncludeReplicas() *DirectedReadOptions_IncludeReplicas
func (*DirectedReadOptions) GetReplicas
func (m *DirectedReadOptions) GetReplicas() isDirectedReadOptions_Replicas
func (*DirectedReadOptions) ProtoMessage
func (*DirectedReadOptions) ProtoMessage()
func (*DirectedReadOptions) ProtoReflect
func (x *DirectedReadOptions) ProtoReflect() protoreflect.Message
func (*DirectedReadOptions) Reset
func (x *DirectedReadOptions) Reset()
func (*DirectedReadOptions) String
func (x *DirectedReadOptions) String() string
DirectedReadOptions_ExcludeReplicas
type DirectedReadOptions_ExcludeReplicas struct {
// The directed read replica selector.
ReplicaSelections []*DirectedReadOptions_ReplicaSelection `protobuf:"bytes,1,rep,name=replica_selections,json=replicaSelections,proto3" json:"replica_selections,omitempty"`
// contains filtered or unexported fields
}
An ExcludeReplicas contains a repeated set of ReplicaSelection that should be excluded from serving requests.
func (*DirectedReadOptions_ExcludeReplicas) Descriptor
func (*DirectedReadOptions_ExcludeReplicas) Descriptor() ([]byte, []int)
Deprecated: Use DirectedReadOptions_ExcludeReplicas.ProtoReflect.Descriptor instead.
func (*DirectedReadOptions_ExcludeReplicas) GetReplicaSelections
func (x *DirectedReadOptions_ExcludeReplicas) GetReplicaSelections() []*DirectedReadOptions_ReplicaSelection
func (*DirectedReadOptions_ExcludeReplicas) ProtoMessage
func (*DirectedReadOptions_ExcludeReplicas) ProtoMessage()
func (*DirectedReadOptions_ExcludeReplicas) ProtoReflect
func (x *DirectedReadOptions_ExcludeReplicas) ProtoReflect() protoreflect.Message
func (*DirectedReadOptions_ExcludeReplicas) Reset
func (x *DirectedReadOptions_ExcludeReplicas) Reset()
func (*DirectedReadOptions_ExcludeReplicas) String
func (x *DirectedReadOptions_ExcludeReplicas) String() string
DirectedReadOptions_ExcludeReplicas_
type DirectedReadOptions_ExcludeReplicas_ struct {
// Exclude_replicas indicates that specified replicas should be excluded
// from serving requests. Spanner will not route requests to the replicas
// in this list.
ExcludeReplicas *DirectedReadOptions_ExcludeReplicas `protobuf:"bytes,2,opt,name=exclude_replicas,json=excludeReplicas,proto3,oneof"`
}
DirectedReadOptions_IncludeReplicas
type DirectedReadOptions_IncludeReplicas struct {
// The directed read replica selector.
ReplicaSelections []*DirectedReadOptions_ReplicaSelection `protobuf:"bytes,1,rep,name=replica_selections,json=replicaSelections,proto3" json:"replica_selections,omitempty"`
// If true, Spanner will not route requests to a replica outside the
// include_replicas list when all of the specified replicas are unavailable
// or unhealthy. Default value is `false`.
AutoFailoverDisabled bool `protobuf:"varint,2,opt,name=auto_failover_disabled,json=autoFailoverDisabled,proto3" json:"auto_failover_disabled,omitempty"`
// contains filtered or unexported fields
}
An IncludeReplicas contains a repeated set of ReplicaSelection which indicates the order in which replicas should be considered.
func (*DirectedReadOptions_IncludeReplicas) Descriptor
func (*DirectedReadOptions_IncludeReplicas) Descriptor() ([]byte, []int)
Deprecated: Use DirectedReadOptions_IncludeReplicas.ProtoReflect.Descriptor instead.
func (*DirectedReadOptions_IncludeReplicas) GetAutoFailoverDisabled
func (x *DirectedReadOptions_IncludeReplicas) GetAutoFailoverDisabled() bool
func (*DirectedReadOptions_IncludeReplicas) GetReplicaSelections
func (x *DirectedReadOptions_IncludeReplicas) GetReplicaSelections() []*DirectedReadOptions_ReplicaSelection
func (*DirectedReadOptions_IncludeReplicas) ProtoMessage
func (*DirectedReadOptions_IncludeReplicas) ProtoMessage()
func (*DirectedReadOptions_IncludeReplicas) ProtoReflect
func (x *DirectedReadOptions_IncludeReplicas) ProtoReflect() protoreflect.Message
func (*DirectedReadOptions_IncludeReplicas) Reset
func (x *DirectedReadOptions_IncludeReplicas) Reset()
func (*DirectedReadOptions_IncludeReplicas) String
func (x *DirectedReadOptions_IncludeReplicas) String() string
DirectedReadOptions_IncludeReplicas_
type DirectedReadOptions_IncludeReplicas_ struct {
// Include_replicas indicates the order of replicas (as they appear in
// this list) to process the request. If auto_failover_disabled is set to
// true and all replicas are exhausted without finding a healthy replica,
// Spanner will wait for a replica in the list to become available, requests
// may fail due to `DEADLINE_EXCEEDED` errors.
IncludeReplicas *DirectedReadOptions_IncludeReplicas `protobuf:"bytes,1,opt,name=include_replicas,json=includeReplicas,proto3,oneof"`
}
DirectedReadOptions_ReplicaSelection
type DirectedReadOptions_ReplicaSelection struct {
Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
Type DirectedReadOptions_ReplicaSelection_Type "" /* 127 byte string literal not displayed */
}
The directed read replica selector. Callers must provide one or more of the following fields for replica selection:
location
- The location must be one of the regions within the multi-region configuration of your database.type
- The type of the replica.
Some examples of using replica_selectors are:
location:us-east1
--> The "us-east1" replica(s) of any available type will be used to process the request.type:READ_ONLY
--> The "READ_ONLY" type replica(s) in nearest available location will be used to process the request.location:us-east1 type:READ_ONLY
--> The "READ_ONLY" type replica(s) in location "us-east1" will be used to process the request.
func (*DirectedReadOptions_ReplicaSelection) Descriptor
func (*DirectedReadOptions_ReplicaSelection) Descriptor() ([]byte, []int)
Deprecated: Use DirectedReadOptions_ReplicaSelection.ProtoReflect.Descriptor instead.
func (*DirectedReadOptions_ReplicaSelection) GetLocation
func (x *DirectedReadOptions_ReplicaSelection) GetLocation() string
func (*DirectedReadOptions_ReplicaSelection) GetType
func (x *DirectedReadOptions_ReplicaSelection) GetType() DirectedReadOptions_ReplicaSelection_Type
func (*DirectedReadOptions_ReplicaSelection) ProtoMessage
func (*DirectedReadOptions_ReplicaSelection) ProtoMessage()
func (*DirectedReadOptions_ReplicaSelection) ProtoReflect
func (x *DirectedReadOptions_ReplicaSelection) ProtoReflect() protoreflect.Message
func (*DirectedReadOptions_ReplicaSelection) Reset
func (x *DirectedReadOptions_ReplicaSelection) Reset()
func (*DirectedReadOptions_ReplicaSelection) String
func (x *DirectedReadOptions_ReplicaSelection) String() string
DirectedReadOptions_ReplicaSelection_Type
type DirectedReadOptions_ReplicaSelection_Type int32
Indicates the type of replica.
DirectedReadOptions_ReplicaSelection_TYPE_UNSPECIFIED, DirectedReadOptions_ReplicaSelection_READ_WRITE, DirectedReadOptions_ReplicaSelection_READ_ONLY
const (
// Not specified.
DirectedReadOptions_ReplicaSelection_TYPE_UNSPECIFIED DirectedReadOptions_ReplicaSelection_Type = 0
// Read-write replicas support both reads and writes.
DirectedReadOptions_ReplicaSelection_READ_WRITE DirectedReadOptions_ReplicaSelection_Type = 1
// Read-only replicas only support reads (not writes).
DirectedReadOptions_ReplicaSelection_READ_ONLY DirectedReadOptions_ReplicaSelection_Type = 2
)
func (DirectedReadOptions_ReplicaSelection_Type) Descriptor
func (DirectedReadOptions_ReplicaSelection_Type) Descriptor() protoreflect.EnumDescriptor
func (DirectedReadOptions_ReplicaSelection_Type) Enum
func (x DirectedReadOptions_ReplicaSelection_Type) Enum() *DirectedReadOptions_ReplicaSelection_Type
func (DirectedReadOptions_ReplicaSelection_Type) EnumDescriptor
func (DirectedReadOptions_ReplicaSelection_Type) EnumDescriptor() ([]byte, []int)
Deprecated: Use DirectedReadOptions_ReplicaSelection_Type.Descriptor instead.
func (DirectedReadOptions_ReplicaSelection_Type) Number
func (x DirectedReadOptions_ReplicaSelection_Type) Number() protoreflect.EnumNumber
func (DirectedReadOptions_ReplicaSelection_Type) String
func (x DirectedReadOptions_ReplicaSelection_Type) String() string
func (DirectedReadOptions_ReplicaSelection_Type) Type
func (DirectedReadOptions_ReplicaSelection_Type) Type() protoreflect.EnumType
ExecuteBatchDmlRequest
type ExecuteBatchDmlRequest struct {
// Required. The session in which the DML statements should be performed.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Required. The transaction to use. Must be a read-write transaction.
//
// To protect against replays, single-use transactions are not supported. The
// caller must either supply an existing transaction ID or begin a new
// transaction.
Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// Required. The list of statements to execute in this batch. Statements are
// executed serially, such that the effects of statement `i` are visible to
// statement `i+1`. Each statement must be a DML statement. Execution stops at
// the first failed statement; the remaining statements are not executed.
//
// Callers must provide at least one statement.
Statements []*ExecuteBatchDmlRequest_Statement `protobuf:"bytes,3,rep,name=statements,proto3" json:"statements,omitempty"`
// Required. A per-transaction sequence number used to identify this request.
// This field makes each request idempotent such that if the request is
// received multiple times, at most one will succeed.
//
// The sequence number must be monotonically increasing within the
// transaction. If a request arrives for the first time with an out-of-order
// sequence number, the transaction may be aborted. Replays of previously
// handled requests will yield the same response as the first execution.
Seqno int64 `protobuf:"varint,4,opt,name=seqno,proto3" json:"seqno,omitempty"`
// Common options for this request.
RequestOptions *RequestOptions `protobuf:"bytes,5,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
// contains filtered or unexported fields
}
The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml].
func (*ExecuteBatchDmlRequest) Descriptor
func (*ExecuteBatchDmlRequest) Descriptor() ([]byte, []int)
Deprecated: Use ExecuteBatchDmlRequest.ProtoReflect.Descriptor instead.
func (*ExecuteBatchDmlRequest) GetRequestOptions
func (x *ExecuteBatchDmlRequest) GetRequestOptions() *RequestOptions
func (*ExecuteBatchDmlRequest) GetSeqno
func (x *ExecuteBatchDmlRequest) GetSeqno() int64
func (*ExecuteBatchDmlRequest) GetSession
func (x *ExecuteBatchDmlRequest) GetSession() string
func (*ExecuteBatchDmlRequest) GetStatements
func (x *ExecuteBatchDmlRequest) GetStatements() []*ExecuteBatchDmlRequest_Statement
func (*ExecuteBatchDmlRequest) GetTransaction
func (x *ExecuteBatchDmlRequest) GetTransaction() *TransactionSelector
func (*ExecuteBatchDmlRequest) ProtoMessage
func (*ExecuteBatchDmlRequest) ProtoMessage()
func (*ExecuteBatchDmlRequest) ProtoReflect
func (x *ExecuteBatchDmlRequest) ProtoReflect() protoreflect.Message
func (*ExecuteBatchDmlRequest) Reset
func (x *ExecuteBatchDmlRequest) Reset()
func (*ExecuteBatchDmlRequest) String
func (x *ExecuteBatchDmlRequest) String() string
ExecuteBatchDmlRequest_Statement
type ExecuteBatchDmlRequest_Statement struct {
Sql string `protobuf:"bytes,1,opt,name=sql,proto3" json:"sql,omitempty"`
Params *structpb.Struct `protobuf:"bytes,2,opt,name=params,proto3" json:"params,omitempty"`
ParamTypes map[string]*Type "" /* 179 byte string literal not displayed */
}
A single DML statement.
func (*ExecuteBatchDmlRequest_Statement) Descriptor
func (*ExecuteBatchDmlRequest_Statement) Descriptor() ([]byte, []int)
Deprecated: Use ExecuteBatchDmlRequest_Statement.ProtoReflect.Descriptor instead.
func (*ExecuteBatchDmlRequest_Statement) GetParamTypes
func (x *ExecuteBatchDmlRequest_Statement) GetParamTypes() map[string]*Type
func (*ExecuteBatchDmlRequest_Statement) GetParams
func (x *ExecuteBatchDmlRequest_Statement) GetParams() *structpb.Struct
func (*ExecuteBatchDmlRequest_Statement) GetSql
func (x *ExecuteBatchDmlRequest_Statement) GetSql() string
func (*ExecuteBatchDmlRequest_Statement) ProtoMessage
func (*ExecuteBatchDmlRequest_Statement) ProtoMessage()
func (*ExecuteBatchDmlRequest_Statement) ProtoReflect
func (x *ExecuteBatchDmlRequest_Statement) ProtoReflect() protoreflect.Message
func (*ExecuteBatchDmlRequest_Statement) Reset
func (x *ExecuteBatchDmlRequest_Statement) Reset()
func (*ExecuteBatchDmlRequest_Statement) String
func (x *ExecuteBatchDmlRequest_Statement) String() string
ExecuteBatchDmlResponse
type ExecuteBatchDmlResponse struct {
// One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
// request that ran successfully, in the same order as the statements in the
// request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
// rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
// [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
// modified by the statement.
//
// Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
// contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
ResultSets []*ResultSet `protobuf:"bytes,1,rep,name=result_sets,json=resultSets,proto3" json:"result_sets,omitempty"`
// If all DML statements are executed successfully, the status is `OK`.
// Otherwise, the error status of the first failed statement.
Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
// contains filtered or unexported fields
}
The response for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML statement that has successfully executed, in the same order as the statements in the request. If a statement fails, the status in the response body identifies the cause of the failure.
To check for DML statements that failed, use the following approach:
Check the status in the response message. The [google.rpc.Code][google.rpc.Code] enum
value `OK` indicates that all statements were executed successfully.
- If the status was not
OK
, check the number of result sets in the response. If the response containsN
[ResultSet][google.spanner.v1.ResultSet] messages, then statementN+1
in the request failed.
- If the status was not
Example 1:
- Request: 5 DML statements, all executed successfully.
- Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the
status
OK
.
Example 2:
- Request: 5 DML statements. The third statement has a syntax error.
Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax error (
INVALID_ARGUMENT
)status. The number of [ResultSet][google.spanner.v1.ResultSet] messages indicates that the third statement failed, and the fourth and fifth statements were not executed.
func (*ExecuteBatchDmlResponse) Descriptor
func (*ExecuteBatchDmlResponse) Descriptor() ([]byte, []int)
Deprecated: Use ExecuteBatchDmlResponse.ProtoReflect.Descriptor instead.
func (*ExecuteBatchDmlResponse) GetResultSets
func (x *ExecuteBatchDmlResponse) GetResultSets() []*ResultSet
func (*ExecuteBatchDmlResponse) GetStatus
func (x *ExecuteBatchDmlResponse) GetStatus() *status.Status
func (*ExecuteBatchDmlResponse) ProtoMessage
func (*ExecuteBatchDmlResponse) ProtoMessage()
func (*ExecuteBatchDmlResponse) ProtoReflect
func (x *ExecuteBatchDmlResponse) ProtoReflect() protoreflect.Message
func (*ExecuteBatchDmlResponse) Reset
func (x *ExecuteBatchDmlResponse) Reset()
func (*ExecuteBatchDmlResponse) String
func (x *ExecuteBatchDmlResponse) String() string
ExecuteSqlRequest
type ExecuteSqlRequest struct {
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"`
Params *structpb.Struct `protobuf:"bytes,4,opt,name=params,proto3" json:"params,omitempty"`
ParamTypes map[string]*Type "" /* 179 byte string literal not displayed */
ResumeToken []byte `protobuf:"bytes,6,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"`
QueryMode ExecuteSqlRequest_QueryMode "" /* 140 byte string literal not displayed */
PartitionToken []byte `protobuf:"bytes,8,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
Seqno int64 `protobuf:"varint,9,opt,name=seqno,proto3" json:"seqno,omitempty"`
QueryOptions *ExecuteSqlRequest_QueryOptions `protobuf:"bytes,10,opt,name=query_options,json=queryOptions,proto3" json:"query_options,omitempty"`
RequestOptions *RequestOptions `protobuf:"bytes,11,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
DirectedReadOptions *DirectedReadOptions `protobuf:"bytes,15,opt,name=directed_read_options,json=directedReadOptions,proto3" json:"directed_read_options,omitempty"`
DataBoostEnabled bool `protobuf:"varint,16,opt,name=data_boost_enabled,json=dataBoostEnabled,proto3" json:"data_boost_enabled,omitempty"`
}
The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql].
func (*ExecuteSqlRequest) Descriptor
func (*ExecuteSqlRequest) Descriptor() ([]byte, []int)
Deprecated: Use ExecuteSqlRequest.ProtoReflect.Descriptor instead.
func (*ExecuteSqlRequest) GetDataBoostEnabled
func (x *ExecuteSqlRequest) GetDataBoostEnabled() bool
func (*ExecuteSqlRequest) GetDirectedReadOptions
func (x *ExecuteSqlRequest) GetDirectedReadOptions() *DirectedReadOptions
func (*ExecuteSqlRequest) GetParamTypes
func (x *ExecuteSqlRequest) GetParamTypes() map[string]*Type
func (*ExecuteSqlRequest) GetParams
func (x *ExecuteSqlRequest) GetParams() *structpb.Struct
func (*ExecuteSqlRequest) GetPartitionToken
func (x *ExecuteSqlRequest) GetPartitionToken() []byte
func (*ExecuteSqlRequest) GetQueryMode
func (x *ExecuteSqlRequest) GetQueryMode() ExecuteSqlRequest_QueryMode
func (*ExecuteSqlRequest) GetQueryOptions
func (x *ExecuteSqlRequest) GetQueryOptions() *ExecuteSqlRequest_QueryOptions
func (*ExecuteSqlRequest) GetRequestOptions
func (x *ExecuteSqlRequest) GetRequestOptions() *RequestOptions
func (*ExecuteSqlRequest) GetResumeToken
func (x *ExecuteSqlRequest) GetResumeToken() []byte
func (*ExecuteSqlRequest) GetSeqno
func (x *ExecuteSqlRequest) GetSeqno() int64
func (*ExecuteSqlRequest) GetSession
func (x *ExecuteSqlRequest) GetSession() string
func (*ExecuteSqlRequest) GetSql
func (x *ExecuteSqlRequest) GetSql() string
func (*ExecuteSqlRequest) GetTransaction
func (x *ExecuteSqlRequest) GetTransaction() *TransactionSelector
func (*ExecuteSqlRequest) ProtoMessage
func (*ExecuteSqlRequest) ProtoMessage()
func (*ExecuteSqlRequest) ProtoReflect
func (x *ExecuteSqlRequest) ProtoReflect() protoreflect.Message
func (*ExecuteSqlRequest) Reset
func (x *ExecuteSqlRequest) Reset()
func (*ExecuteSqlRequest) String
func (x *ExecuteSqlRequest) String() string
ExecuteSqlRequest_QueryMode
type ExecuteSqlRequest_QueryMode int32
Mode in which the statement must be processed.
ExecuteSqlRequest_NORMAL, ExecuteSqlRequest_PLAN, ExecuteSqlRequest_PROFILE
const (
// The default mode. Only the statement results are returned.
ExecuteSqlRequest_NORMAL ExecuteSqlRequest_QueryMode = 0
// This mode returns only the query plan, without any results or
// execution statistics information.
ExecuteSqlRequest_PLAN ExecuteSqlRequest_QueryMode = 1
// This mode returns both the query plan and the execution statistics along
// with the results.
ExecuteSqlRequest_PROFILE ExecuteSqlRequest_QueryMode = 2
)
func (ExecuteSqlRequest_QueryMode) Descriptor
func (ExecuteSqlRequest_QueryMode) Descriptor() protoreflect.EnumDescriptor
func (ExecuteSqlRequest_QueryMode) Enum
func (x ExecuteSqlRequest_QueryMode) Enum() *ExecuteSqlRequest_QueryMode
func (ExecuteSqlRequest_QueryMode) EnumDescriptor
func (ExecuteSqlRequest_QueryMode) EnumDescriptor() ([]byte, []int)
Deprecated: Use ExecuteSqlRequest_QueryMode.Descriptor instead.
func (ExecuteSqlRequest_QueryMode) Number
func (x ExecuteSqlRequest_QueryMode) Number() protoreflect.EnumNumber
func (ExecuteSqlRequest_QueryMode) String
func (x ExecuteSqlRequest_QueryMode) String() string
func (ExecuteSqlRequest_QueryMode) Type
func (ExecuteSqlRequest_QueryMode) Type() protoreflect.EnumType
ExecuteSqlRequest_QueryOptions
type ExecuteSqlRequest_QueryOptions struct {
OptimizerVersion string `protobuf:"bytes,1,opt,name=optimizer_version,json=optimizerVersion,proto3" json:"optimizer_version,omitempty"`
OptimizerStatisticsPackage string "" /* 141 byte string literal not displayed */
}
Query optimizer configuration.
func (*ExecuteSqlRequest_QueryOptions) Descriptor
func (*ExecuteSqlRequest_QueryOptions) Descriptor() ([]byte, []int)
Deprecated: Use ExecuteSqlRequest_QueryOptions.ProtoReflect.Descriptor instead.
func (*ExecuteSqlRequest_QueryOptions) GetOptimizerStatisticsPackage
func (x *ExecuteSqlRequest_QueryOptions) GetOptimizerStatisticsPackage() string
func (*ExecuteSqlRequest_QueryOptions) GetOptimizerVersion
func (x *ExecuteSqlRequest_QueryOptions) GetOptimizerVersion() string
func (*ExecuteSqlRequest_QueryOptions) ProtoMessage
func (*ExecuteSqlRequest_QueryOptions) ProtoMessage()
func (*ExecuteSqlRequest_QueryOptions) ProtoReflect
func (x *ExecuteSqlRequest_QueryOptions) ProtoReflect() protoreflect.Message
func (*ExecuteSqlRequest_QueryOptions) Reset
func (x *ExecuteSqlRequest_QueryOptions) Reset()
func (*ExecuteSqlRequest_QueryOptions) String
func (x *ExecuteSqlRequest_QueryOptions) String() string
GetSessionRequest
type GetSessionRequest struct {
// Required. The name of the session to retrieve.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
The request for [GetSession][google.spanner.v1.Spanner.GetSession].
func (*GetSessionRequest) Descriptor
func (*GetSessionRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetSessionRequest.ProtoReflect.Descriptor instead.
func (*GetSessionRequest) GetName
func (x *GetSessionRequest) GetName() string
func (*GetSessionRequest) ProtoMessage
func (*GetSessionRequest) ProtoMessage()
func (*GetSessionRequest) ProtoReflect
func (x *GetSessionRequest) ProtoReflect() protoreflect.Message
func (*GetSessionRequest) Reset
func (x *GetSessionRequest) Reset()
func (*GetSessionRequest) String
func (x *GetSessionRequest) String() string
KeyRange
type KeyRange struct {
// The start key must be provided. It can be either closed or open.
//
// Types that are assignable to StartKeyType:
//
// *KeyRange_StartClosed
// *KeyRange_StartOpen
StartKeyType isKeyRange_StartKeyType `protobuf_oneof:"start_key_type"`
// The end key must be provided. It can be either closed or open.
//
// Types that are assignable to EndKeyType:
//
// *KeyRange_EndClosed
// *KeyRange_EndOpen
EndKeyType isKeyRange_EndKeyType `protobuf_oneof:"end_key_type"`
// contains filtered or unexported fields
}
KeyRange represents a range of rows in a table or index.
A range has a start key and an end key. These keys can be open or closed, indicating if the range includes rows with that key.
Keys are represented by lists, where the ith value in the list corresponds to the ith component of the table or index primary key. Individual values are encoded as described [here][google.spanner.v1.TypeCode].
For example, consider the following table definition:
CREATE TABLE UserEvents (
UserName STRING(MAX),
EventDate STRING(10)
) PRIMARY KEY(UserName, EventDate);
The following keys name rows in this table:
["Bob", "2014-09-23"]
["Alfred", "2015-06-12"]
Since the UserEvents
table's PRIMARY KEY
clause names two
columns, each UserEvents
key has two elements; the first is the
UserName
, and the second is the EventDate
.
Key ranges with multiple components are interpreted
lexicographically by component using the table or index key's declared
sort order. For example, the following range returns all events for
user "Bob"
that occurred in the year 2015:
"start_closed": ["Bob", "2015-01-01"]
"end_closed": ["Bob", "2015-12-31"]
Start and end keys can omit trailing key components. This affects the inclusion and exclusion of rows that exactly match the provided key components: if the key is closed, then rows that exactly match the provided components are included; if the key is open, then rows that exactly match are not included.
For example, the following range includes all events for "Bob"
that
occurred during and after the year 2000:
"start_closed": ["Bob", "2000-01-01"]
"end_closed": ["Bob"]
The next example retrieves all events for "Bob"
:
"start_closed": ["Bob"]
"end_closed": ["Bob"]
To retrieve events before the year 2000:
"start_closed": ["Bob"]
"end_open": ["Bob", "2000-01-01"]
The following range includes all rows in the table:
"start_closed": []
"end_closed": []
This range returns all users whose UserName
begins with any
character from A to C:
"start_closed": ["A"]
"end_open": ["D"]
This range returns all users whose UserName
begins with B:
"start_closed": ["B"]
"end_open": ["C"]
Key ranges honor column sort order. For example, suppose a table is defined as follows:
CREATE TABLE DescendingSortedTable {
Key INT64,
...
) PRIMARY KEY(Key DESC);
The following range retrieves all rows with key values between 1 and 100 inclusive:
"start_closed": ["100"]
"end_closed": ["1"]
Note that 100 is passed as the start, and 1 is passed as the end,
because Key
is a descending column in the schema.
func (*KeyRange) Descriptor
Deprecated: Use KeyRange.ProtoReflect.Descriptor instead.
func (*KeyRange) GetEndClosed
func (*KeyRange) GetEndKeyType
func (m *KeyRange) GetEndKeyType() isKeyRange_EndKeyType
func (*KeyRange) GetEndOpen
func (*KeyRange) GetStartClosed
func (*KeyRange) GetStartKeyType
func (m *KeyRange) GetStartKeyType() isKeyRange_StartKeyType
func (*KeyRange) GetStartOpen
func (*KeyRange) ProtoMessage
func (*KeyRange) ProtoMessage()
func (*KeyRange) ProtoReflect
func (x *KeyRange) ProtoReflect() protoreflect.Message
func (*KeyRange) Reset
func (x *KeyRange) Reset()
func (*KeyRange) String
KeyRange_EndClosed
type KeyRange_EndClosed struct {
// If the end is closed, then the range includes all rows whose
// first `len(end_closed)` key columns exactly match `end_closed`.
EndClosed *structpb.ListValue `protobuf:"bytes,3,opt,name=end_closed,json=endClosed,proto3,oneof"`
}
KeyRange_EndOpen
type KeyRange_EndOpen struct {
// If the end is open, then the range excludes rows whose first
// `len(end_open)` key columns exactly match `end_open`.
EndOpen *structpb.ListValue `protobuf:"bytes,4,opt,name=end_open,json=endOpen,proto3,oneof"`
}
KeyRange_StartClosed
type KeyRange_StartClosed struct {
// If the start is closed, then the range includes all rows whose
// first `len(start_closed)` key columns exactly match `start_closed`.
StartClosed *structpb.ListValue `protobuf:"bytes,1,opt,name=start_closed,json=startClosed,proto3,oneof"`
}
KeyRange_StartOpen
type KeyRange_StartOpen struct {
// If the start is open, then the range excludes rows whose first
// `len(start_open)` key columns exactly match `start_open`.
StartOpen *structpb.ListValue `protobuf:"bytes,2,opt,name=start_open,json=startOpen,proto3,oneof"`
}
KeySet
type KeySet struct {
// A list of specific keys. Entries in `keys` should have exactly as
// many elements as there are columns in the primary or index key
// with which this `KeySet` is used. Individual key values are
// encoded as described [here][google.spanner.v1.TypeCode].
Keys []*structpb.ListValue `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"`
// A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more information about
// key range specifications.
Ranges []*KeyRange `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"`
// For convenience `all` can be set to `true` to indicate that this
// `KeySet` matches all keys in the table or index. Note that any keys
// specified in `keys` or `ranges` are only yielded once.
All bool `protobuf:"varint,3,opt,name=all,proto3" json:"all,omitempty"`
// contains filtered or unexported fields
}
KeySet
defines a collection of Cloud Spanner keys and/or key ranges. All
the keys are expected to be in the same table or index. The keys need
not be sorted in any particular way.
If the same key is specified multiple times in the set (for example if two ranges, two keys, or a key and a range overlap), Cloud Spanner behaves as if the key were only specified once.
func (*KeySet) Descriptor
Deprecated: Use KeySet.ProtoReflect.Descriptor instead.
func (*KeySet) GetAll
func (*KeySet) GetKeys
func (*KeySet) GetRanges
func (*KeySet) ProtoMessage
func (*KeySet) ProtoMessage()
func (*KeySet) ProtoReflect
func (x *KeySet) ProtoReflect() protoreflect.Message
func (*KeySet) Reset
func (x *KeySet) Reset()
func (*KeySet) String
ListSessionsRequest
type ListSessionsRequest struct {
// Required. The database in which to list sessions.
Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
// Number of sessions to be returned in the response. If 0 or less, defaults
// to the server's maximum allowed page size.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// If non-empty, `page_token` should contain a
// [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
// from a previous
// [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// An expression for filtering the results of the request. Filter rules are
// case insensitive. The fields eligible for filtering are:
//
// - `labels.key` where key is the name of a label
//
// Some examples of using filters are:
//
// - `labels.env:*` --> The session has the label "env".
// - `labels.env:dev` --> The session has the label "env" and the value of
// the label contains the string "dev".
Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
// contains filtered or unexported fields
}
The request for [ListSessions][google.spanner.v1.Spanner.ListSessions].
func (*ListSessionsRequest) Descriptor
func (*ListSessionsRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListSessionsRequest.ProtoReflect.Descriptor instead.
func (*ListSessionsRequest) GetDatabase
func (x *ListSessionsRequest) GetDatabase() string
func (*ListSessionsRequest) GetFilter
func (x *ListSessionsRequest) GetFilter() string
func (*ListSessionsRequest) GetPageSize
func (x *ListSessionsRequest) GetPageSize() int32
func (*ListSessionsRequest) GetPageToken
func (x *ListSessionsRequest) GetPageToken() string
func (*ListSessionsRequest) ProtoMessage
func (*ListSessionsRequest) ProtoMessage()
func (*ListSessionsRequest) ProtoReflect
func (x *ListSessionsRequest) ProtoReflect() protoreflect.Message
func (*ListSessionsRequest) Reset
func (x *ListSessionsRequest) Reset()
func (*ListSessionsRequest) String
func (x *ListSessionsRequest) String() string
ListSessionsResponse
type ListSessionsResponse struct {
// The list of requested sessions.
Sessions []*Session `protobuf:"bytes,1,rep,name=sessions,proto3" json:"sessions,omitempty"`
// `next_page_token` can be sent in a subsequent
// [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
// of the matching sessions.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// contains filtered or unexported fields
}
The response for [ListSessions][google.spanner.v1.Spanner.ListSessions].
func (*ListSessionsResponse) Descriptor
func (*ListSessionsResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListSessionsResponse.ProtoReflect.Descriptor instead.
func (*ListSessionsResponse) GetNextPageToken
func (x *ListSessionsResponse) GetNextPageToken() string
func (*ListSessionsResponse) GetSessions
func (x *ListSessionsResponse) GetSessions() []*Session
func (*ListSessionsResponse) ProtoMessage
func (*ListSessionsResponse) ProtoMessage()
func (*ListSessionsResponse) ProtoReflect
func (x *ListSessionsResponse) ProtoReflect() protoreflect.Message
func (*ListSessionsResponse) Reset
func (x *ListSessionsResponse) Reset()
func (*ListSessionsResponse) String
func (x *ListSessionsResponse) String() string
Mutation
type Mutation struct {
// Required. The operation to perform.
//
// Types that are assignable to Operation:
//
// *Mutation_Insert
// *Mutation_Update
// *Mutation_InsertOrUpdate
// *Mutation_Replace
// *Mutation_Delete_
Operation isMutation_Operation `protobuf_oneof:"operation"`
// contains filtered or unexported fields
}
A modification to one or more Cloud Spanner rows. Mutations can be applied to a Cloud Spanner database by sending them in a [Commit][google.spanner.v1.Spanner.Commit] call.
func (*Mutation) Descriptor
Deprecated: Use Mutation.ProtoReflect.Descriptor instead.
func (*Mutation) GetDelete
func (x *Mutation) GetDelete() *Mutation_Delete
func (*Mutation) GetInsert
func (x *Mutation) GetInsert() *Mutation_Write
func (*Mutation) GetInsertOrUpdate
func (x *Mutation) GetInsertOrUpdate() *Mutation_Write
func (*Mutation) GetOperation
func (m *Mutation) GetOperation() isMutation_Operation
func (*Mutation) GetReplace
func (x *Mutation) GetReplace() *Mutation_Write
func (*Mutation) GetUpdate
func (x *Mutation) GetUpdate() *Mutation_Write
func (*Mutation) ProtoMessage
func (*Mutation) ProtoMessage()
func (*Mutation) ProtoReflect
func (x *Mutation) ProtoReflect() protoreflect.Message
func (*Mutation) Reset
func (x *Mutation) Reset()
func (*Mutation) String
Mutation_Delete
type Mutation_Delete struct {
// Required. The table whose rows will be deleted.
Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"`
// Required. The primary keys of the rows within [table][google.spanner.v1.Mutation.Delete.table] to delete. The
// primary keys must be specified in the order in which they appear in the
// `PRIMARY KEY()` clause of the table's equivalent DDL statement (the DDL
// statement used to create the table).
// Delete is idempotent. The transaction will succeed even if some or all
// rows do not exist.
KeySet *KeySet `protobuf:"bytes,2,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"`
// contains filtered or unexported fields
}
Arguments to [delete][google.spanner.v1.Mutation.delete] operations.
func (*Mutation_Delete) Descriptor
func (*Mutation_Delete) Descriptor() ([]byte, []int)
Deprecated: Use Mutation_Delete.ProtoReflect.Descriptor instead.
func (*Mutation_Delete) GetKeySet
func (x *Mutation_Delete) GetKeySet() *KeySet
func (*Mutation_Delete) GetTable
func (x *Mutation_Delete) GetTable() string
func (*Mutation_Delete) ProtoMessage
func (*Mutation_Delete) ProtoMessage()
func (*Mutation_Delete) ProtoReflect
func (x *Mutation_Delete) ProtoReflect() protoreflect.Message
func (*Mutation_Delete) Reset
func (x *Mutation_Delete) Reset()
func (*Mutation_Delete) String
func (x *Mutation_Delete) String() string
Mutation_Delete_
type Mutation_Delete_ struct {
// Delete rows from a table. Succeeds whether or not the named
// rows were present.
Delete *Mutation_Delete `protobuf:"bytes,5,opt,name=delete,proto3,oneof"`
}
Mutation_Insert
type Mutation_Insert struct {
// Insert new rows in a table. If any of the rows already exist,
// the write or transaction fails with error `ALREADY_EXISTS`.
Insert *Mutation_Write `protobuf:"bytes,1,opt,name=insert,proto3,oneof"`
}
Mutation_InsertOrUpdate
type Mutation_InsertOrUpdate struct {
// Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, then
// its column values are overwritten with the ones provided. Any
// column values not explicitly written are preserved.
//
// When using [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as when using [insert][google.spanner.v1.Mutation.insert], all `NOT
// NULL` columns in the table must be given a value. This holds true
// even when the row already exists and will therefore actually be updated.
InsertOrUpdate *Mutation_Write `protobuf:"bytes,3,opt,name=insert_or_update,json=insertOrUpdate,proto3,oneof"`
}
Mutation_Replace
type Mutation_Replace struct {
// Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, it is
// deleted, and the column values provided are inserted
// instead. Unlike [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this means any values not
// explicitly written become `NULL`.
//
// In an interleaved table, if you create the child table with the
// `ON DELETE CASCADE` annotation, then replacing a parent row
// also deletes the child rows. Otherwise, you must delete the
// child rows before you replace the parent row.
Replace *Mutation_Write `protobuf:"bytes,4,opt,name=replace,proto3,oneof"`
}
Mutation_Update
type Mutation_Update struct {
// Update existing rows in a table. If any of the rows does not
// already exist, the transaction fails with error `NOT_FOUND`.
Update *Mutation_Write `protobuf:"bytes,2,opt,name=update,proto3,oneof"`
}
Mutation_Write
type Mutation_Write struct {
// Required. The table whose rows will be written.
Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"`
// The names of the columns in [table][google.spanner.v1.Mutation.Write.table] to be written.
//
// The list of columns must contain enough columns to allow
// Cloud Spanner to derive values for all primary key columns in the
// row(s) to be modified.
Columns []string `protobuf:"bytes,2,rep,name=columns,proto3" json:"columns,omitempty"`
// The values to be written. `values` can contain more than one
// list of values. If it does, then multiple rows are written, one
// for each entry in `values`. Each list in `values` must have
// exactly as many entries as there are entries in [columns][google.spanner.v1.Mutation.Write.columns]
// above. Sending multiple lists is equivalent to sending multiple
// `Mutation`s, each containing one `values` entry and repeating
// [table][google.spanner.v1.Mutation.Write.table] and [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in each list are
// encoded as described [here][google.spanner.v1.TypeCode].
Values []*structpb.ListValue `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"`
// contains filtered or unexported fields
}
Arguments to [insert][google.spanner.v1.Mutation.insert], [update][google.spanner.v1.Mutation.update], [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and [replace][google.spanner.v1.Mutation.replace] operations.
func (*Mutation_Write) Descriptor
func (*Mutation_Write) Descriptor() ([]byte, []int)
Deprecated: Use Mutation_Write.ProtoReflect.Descriptor instead.
func (*Mutation_Write) GetColumns
func (x *Mutation_Write) GetColumns() []string
func (*Mutation_Write) GetTable
func (x *Mutation_Write) GetTable() string
func (*Mutation_Write) GetValues
func (x *Mutation_Write) GetValues() []*structpb.ListValue
func (*Mutation_Write) ProtoMessage
func (*Mutation_Write) ProtoMessage()
func (*Mutation_Write) ProtoReflect
func (x *Mutation_Write) ProtoReflect() protoreflect.Message
func (*Mutation_Write) Reset
func (x *Mutation_Write) Reset()
func (*Mutation_Write) String
func (x *Mutation_Write) String() string
PartialResultSet
type PartialResultSet struct {
// Metadata about the result set, such as row type information.
// Only present in the first response.
Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// A streamed result set consists of a stream of values, which might
// be split into many `PartialResultSet` messages to accommodate
// large rows and/or large values. Every N complete values defines a
// row, where N is equal to the number of entries in
// [metadata.row_type.fields][google.spanner.v1.StructType.fields].
//
// Most values are encoded based on type as described
// [here][google.spanner.v1.TypeCode].
//
// It is possible that the last value in values is "chunked",
// meaning that the rest of the value is sent in subsequent
// `PartialResultSet`(s). This is denoted by the [chunked_value][google.spanner.v1.PartialResultSet.chunked_value]
// field. Two or more chunked values can be merged to form a
// complete value as follows:
//
// - `bool/number/null`: cannot be chunked
// - `string`: concatenate the strings
// - `list`: concatenate the lists. If the last element in a list is a
// `string`, `list`, or `object`, merge it with the first element in
// the next list by applying these rules recursively.
// - `object`: concatenate the (field name, field value) pairs. If a
// field name is duplicated, then apply these rules recursively
// to merge the field values.
//
// Some examples of merging:
//
// # Strings are concatenated.
// "foo", "bar" => "foobar"
//
// # Lists of non-strings are concatenated.
// [2, 3], [4] => [2, 3, 4]
//
// # Lists are concatenated, but the last and first elements are merged
// # because they are strings.
// ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
//
// # Lists are concatenated, but the last and first elements are merged
// # because they are lists. Recursively, the last and first elements
// # of the inner lists are merged because they are strings.
// ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
//
// # Non-overlapping object fields are combined.
// {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
//
// # Overlapping object fields are merged.
// {"a": "1"}, {"a": "2"} => {"a": "12"}
//
// # Examples of merging objects containing lists of strings.
// {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
//
// For a more complete example, suppose a streaming SQL query is
// yielding a result set whose rows contain a single string
// field. The following `PartialResultSet`s might be yielded:
//
// {
// "metadata": { ... }
// "values": ["Hello", "W"]
// "chunked_value": true
// "resume_token": "Af65..."
// }
// {
// "values": ["orl"]
// "chunked_value": true
// "resume_token": "Bqp2..."
// }
// {
// "values": ["d"]
// "resume_token": "Zx1B..."
// }
//
// This sequence of `PartialResultSet`s encodes two rows, one
// containing the field value `"Hello"`, and a second containing the
// field value `"World" = "W" + "orl" + "d"`.
Values []*structpb.Value `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
// If true, then the final value in [values][google.spanner.v1.PartialResultSet.values] is chunked, and must
// be combined with more values from subsequent `PartialResultSet`s
// to obtain a complete field value.
ChunkedValue bool `protobuf:"varint,3,opt,name=chunked_value,json=chunkedValue,proto3" json:"chunked_value,omitempty"`
// Streaming calls might be interrupted for a variety of reasons, such
// as TCP connection loss. If this occurs, the stream of results can
// be resumed by re-sending the original request and including
// `resume_token`. Note that executing any other transaction in the
// same session invalidates the token.
ResumeToken []byte `protobuf:"bytes,4,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"`
// Query plan and execution statistics for the statement that produced this
// streaming result set. These can be requested by setting
// [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] and are sent
// only once with the last response in the stream.
// This field will also be present in the last response for DML
// statements.
Stats *ResultSetStats `protobuf:"bytes,5,opt,name=stats,proto3" json:"stats,omitempty"`
// contains filtered or unexported fields
}
Partial results from a streaming read or SQL query. Streaming reads and SQL queries better tolerate large result sets, large rows, and large values, but are a little trickier to consume.
func (*PartialResultSet) Descriptor
func (*PartialResultSet) Descriptor() ([]byte, []int)
Deprecated: Use PartialResultSet.ProtoReflect.Descriptor instead.
func (*PartialResultSet) GetChunkedValue
func (x *PartialResultSet) GetChunkedValue() bool
func (*PartialResultSet) GetMetadata
func (x *PartialResultSet) GetMetadata() *ResultSetMetadata
func (*PartialResultSet) GetResumeToken
func (x *PartialResultSet) GetResumeToken() []byte
func (*PartialResultSet) GetStats
func (x *PartialResultSet) GetStats() *ResultSetStats
func (*PartialResultSet) GetValues
func (x *PartialResultSet) GetValues() []*structpb.Value
func (*PartialResultSet) ProtoMessage
func (*PartialResultSet) ProtoMessage()
func (*PartialResultSet) ProtoReflect
func (x *PartialResultSet) ProtoReflect() protoreflect.Message
func (*PartialResultSet) Reset
func (x *PartialResultSet) Reset()
func (*PartialResultSet) String
func (x *PartialResultSet) String() string
Partition
type Partition struct {
// This token can be passed to Read, StreamingRead, ExecuteSql, or
// ExecuteStreamingSql requests to restrict the results to those identified by
// this partition token.
PartitionToken []byte `protobuf:"bytes,1,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
// contains filtered or unexported fields
}
Information returned for each partition returned in a PartitionResponse.
func (*Partition) Descriptor
Deprecated: Use Partition.ProtoReflect.Descriptor instead.
func (*Partition) GetPartitionToken
func (*Partition) ProtoMessage
func (*Partition) ProtoMessage()
func (*Partition) ProtoReflect
func (x *Partition) ProtoReflect() protoreflect.Message
func (*Partition) Reset
func (x *Partition) Reset()
func (*Partition) String
PartitionOptions
type PartitionOptions struct {
// **Note:** This hint is currently ignored by PartitionQuery and
// PartitionRead requests.
//
// The desired data size for each partition generated. The default for this
// option is currently 1 GiB. This is only a hint. The actual size of each
// partition may be smaller or larger than this size request.
PartitionSizeBytes int64 `protobuf:"varint,1,opt,name=partition_size_bytes,json=partitionSizeBytes,proto3" json:"partition_size_bytes,omitempty"`
// **Note:** This hint is currently ignored by PartitionQuery and
// PartitionRead requests.
//
// The desired maximum number of partitions to return. For example, this may
// be set to the number of workers available. The default for this option
// is currently 10,000. The maximum value is currently 200,000. This is only
// a hint. The actual number of partitions returned may be smaller or larger
// than this maximum count request.
MaxPartitions int64 `protobuf:"varint,2,opt,name=max_partitions,json=maxPartitions,proto3" json:"max_partitions,omitempty"`
// contains filtered or unexported fields
}
Options for a PartitionQueryRequest and PartitionReadRequest.
func (*PartitionOptions) Descriptor
func (*PartitionOptions) Descriptor() ([]byte, []int)
Deprecated: Use PartitionOptions.ProtoReflect.Descriptor instead.
func (*PartitionOptions) GetMaxPartitions
func (x *PartitionOptions) GetMaxPartitions() int64
func (*PartitionOptions) GetPartitionSizeBytes
func (x *PartitionOptions) GetPartitionSizeBytes() int64
func (*PartitionOptions) ProtoMessage
func (*PartitionOptions) ProtoMessage()
func (*PartitionOptions) ProtoReflect
func (x *PartitionOptions) ProtoReflect() protoreflect.Message
func (*PartitionOptions) Reset
func (x *PartitionOptions) Reset()
func (*PartitionOptions) String
func (x *PartitionOptions) String() string
PartitionQueryRequest
type PartitionQueryRequest struct {
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"`
Params *structpb.Struct `protobuf:"bytes,4,opt,name=params,proto3" json:"params,omitempty"`
ParamTypes map[string]*Type "" /* 179 byte string literal not displayed */
PartitionOptions *PartitionOptions `protobuf:"bytes,6,opt,name=partition_options,json=partitionOptions,proto3" json:"partition_options,omitempty"`
}
The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
func (*PartitionQueryRequest) Descriptor
func (*PartitionQueryRequest) Descriptor() ([]byte, []int)
Deprecated: Use PartitionQueryRequest.ProtoReflect.Descriptor instead.
func (*PartitionQueryRequest) GetParamTypes
func (x *PartitionQueryRequest) GetParamTypes() map[string]*Type
func (*PartitionQueryRequest) GetParams
func (x *PartitionQueryRequest) GetParams() *structpb.Struct
func (*PartitionQueryRequest) GetPartitionOptions
func (x *PartitionQueryRequest) GetPartitionOptions() *PartitionOptions
func (*PartitionQueryRequest) GetSession
func (x *PartitionQueryRequest) GetSession() string
func (*PartitionQueryRequest) GetSql
func (x *PartitionQueryRequest) GetSql() string
func (*PartitionQueryRequest) GetTransaction
func (x *PartitionQueryRequest) GetTransaction() *TransactionSelector
func (*PartitionQueryRequest) ProtoMessage
func (*PartitionQueryRequest) ProtoMessage()
func (*PartitionQueryRequest) ProtoReflect
func (x *PartitionQueryRequest) ProtoReflect() protoreflect.Message
func (*PartitionQueryRequest) Reset
func (x *PartitionQueryRequest) Reset()
func (*PartitionQueryRequest) String
func (x *PartitionQueryRequest) String() string
PartitionReadRequest
type PartitionReadRequest struct {
// Required. The session used to create the partitions.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Read only snapshot transactions are supported, read/write and single use
// transactions are not.
Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// Required. The name of the table in the database to be read.
Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"`
// If non-empty, the name of an index on
// [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
// instead of the table primary key when interpreting
// [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
// result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
// for further information.
Index string `protobuf:"bytes,4,opt,name=index,proto3" json:"index,omitempty"`
// The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
// returned for each row matching this request.
Columns []string `protobuf:"bytes,5,rep,name=columns,proto3" json:"columns,omitempty"`
// Required. `key_set` identifies the rows to be yielded. `key_set` names the
// primary keys of the rows in
// [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
// [index][google.spanner.v1.PartitionReadRequest.index] is present. If
// [index][google.spanner.v1.PartitionReadRequest.index] is present, then
// [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
// index keys in [index][google.spanner.v1.PartitionReadRequest.index].
//
// It is not an error for the `key_set` to name rows that do not
// exist in the database. Read yields nothing for nonexistent rows.
KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"`
// Additional options that affect how many partitions are created.
PartitionOptions *PartitionOptions `protobuf:"bytes,9,opt,name=partition_options,json=partitionOptions,proto3" json:"partition_options,omitempty"`
// contains filtered or unexported fields
}
The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
func (*PartitionReadRequest) Descriptor
func (*PartitionReadRequest) Descriptor() ([]byte, []int)
Deprecated: Use PartitionReadRequest.ProtoReflect.Descriptor instead.
func (*PartitionReadRequest) GetColumns
func (x *PartitionReadRequest) GetColumns() []string
func (*PartitionReadRequest) GetIndex
func (x *PartitionReadRequest) GetIndex() string
func (*PartitionReadRequest) GetKeySet
func (x *PartitionReadRequest) GetKeySet() *KeySet
func (*PartitionReadRequest) GetPartitionOptions
func (x *PartitionReadRequest) GetPartitionOptions() *PartitionOptions
func (*PartitionReadRequest) GetSession
func (x *PartitionReadRequest) GetSession() string
func (*PartitionReadRequest) GetTable
func (x *PartitionReadRequest) GetTable() string
func (*PartitionReadRequest) GetTransaction
func (x *PartitionReadRequest) GetTransaction() *TransactionSelector
func (*PartitionReadRequest) ProtoMessage
func (*PartitionReadRequest) ProtoMessage()
func (*PartitionReadRequest) ProtoReflect
func (x *PartitionReadRequest) ProtoReflect() protoreflect.Message
func (*PartitionReadRequest) Reset
func (x *PartitionReadRequest) Reset()
func (*PartitionReadRequest) String
func (x *PartitionReadRequest) String() string
PartitionResponse
type PartitionResponse struct {
// Partitions created by this request.
Partitions []*Partition `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions,omitempty"`
// Transaction created by this request.
Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// contains filtered or unexported fields
}
The response for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] or [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
func (*PartitionResponse) Descriptor
func (*PartitionResponse) Descriptor() ([]byte, []int)
Deprecated: Use PartitionResponse.ProtoReflect.Descriptor instead.
func (*PartitionResponse) GetPartitions
func (x *PartitionResponse) GetPartitions() []*Partition
func (*PartitionResponse) GetTransaction
func (x *PartitionResponse) GetTransaction() *Transaction
func (*PartitionResponse) ProtoMessage
func (*PartitionResponse) ProtoMessage()
func (*PartitionResponse) ProtoReflect
func (x *PartitionResponse) ProtoReflect() protoreflect.Message
func (*PartitionResponse) Reset
func (x *PartitionResponse) Reset()
func (*PartitionResponse) String
func (x *PartitionResponse) String() string
PlanNode
type PlanNode struct {
// The `PlanNode`'s index in [node list][google.spanner.v1.QueryPlan.plan_nodes].
Index int32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
// Used to determine the type of node. May be needed for visualizing
// different kinds of nodes differently. For example, If the node is a
// [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a condensed representation
// which can be used to directly embed a description of the node in its
// parent.
Kind PlanNode_Kind `protobuf:"varint,2,opt,name=kind,proto3,enum=google.spanner.v1.PlanNode_Kind" json:"kind,omitempty"`
// The display name for the node.
DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
// List of child node `index`es and their relationship to this parent.
ChildLinks []*PlanNode_ChildLink `protobuf:"bytes,4,rep,name=child_links,json=childLinks,proto3" json:"child_links,omitempty"`
// Condensed representation for [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
ShortRepresentation *PlanNode_ShortRepresentation `protobuf:"bytes,5,opt,name=short_representation,json=shortRepresentation,proto3" json:"short_representation,omitempty"`
// Attributes relevant to the node contained in a group of key-value pairs.
// For example, a Parameter Reference node could have the following
// information in its metadata:
//
// {
// "parameter_reference": "param1",
// "parameter_type": "array"
// }
Metadata *structpb.Struct `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The execution statistics associated with the node, contained in a group of
// key-value pairs. Only present if the plan was returned as a result of a
// profile query. For example, number of executions, number of rows/time per
// execution etc.
ExecutionStats *structpb.Struct `protobuf:"bytes,7,opt,name=execution_stats,json=executionStats,proto3" json:"execution_stats,omitempty"`
// contains filtered or unexported fields
}
Node information for nodes appearing in a [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes].
func (*PlanNode) Descriptor
Deprecated: Use PlanNode.ProtoReflect.Descriptor instead.
func (*PlanNode) GetChildLinks
func (x *PlanNode) GetChildLinks() []*PlanNode_ChildLink
func (*PlanNode) GetDisplayName
func (*PlanNode) GetExecutionStats
func (*PlanNode) GetIndex
func (*PlanNode) GetKind
func (x *PlanNode) GetKind() PlanNode_Kind
func (*PlanNode) GetMetadata
func (*PlanNode) GetShortRepresentation
func (x *PlanNode) GetShortRepresentation() *PlanNode_ShortRepresentation
func (*PlanNode) ProtoMessage
func (*PlanNode) ProtoMessage()
func (*PlanNode) ProtoReflect
func (x *PlanNode) ProtoReflect() protoreflect.Message
func (*PlanNode) Reset
func (x *PlanNode) Reset()
func (*PlanNode) String
PlanNode_ChildLink
type PlanNode_ChildLink struct {
// The node to which the link points.
ChildIndex int32 `protobuf:"varint,1,opt,name=child_index,json=childIndex,proto3" json:"child_index,omitempty"`
// The type of the link. For example, in Hash Joins this could be used to
// distinguish between the build child and the probe child, or in the case
// of the child being an output variable, to represent the tag associated
// with the output variable.
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
// Only present if the child node is [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds
// to an output variable of the parent node. The field carries the name of
// the output variable.
// For example, a `TableScan` operator that reads rows from a table will
// have child links to the `SCALAR` nodes representing the output variables
// created for each column that is read by the operator. The corresponding
// `variable` fields will be set to the variable names assigned to the
// columns.
Variable string `protobuf:"bytes,3,opt,name=variable,proto3" json:"variable,omitempty"`
// contains filtered or unexported fields
}
Metadata associated with a parent-child relationship appearing in a [PlanNode][google.spanner.v1.PlanNode].
func (*PlanNode_ChildLink) Descriptor
func (*PlanNode_ChildLink) Descriptor() ([]byte, []int)
Deprecated: Use PlanNode_ChildLink.ProtoReflect.Descriptor instead.
func (*PlanNode_ChildLink) GetChildIndex
func (x *PlanNode_ChildLink) GetChildIndex() int32
func (*PlanNode_ChildLink) GetType
func (x *PlanNode_ChildLink) GetType() string
func (*PlanNode_ChildLink) GetVariable
func (x *PlanNode_ChildLink) GetVariable() string
func (*PlanNode_ChildLink) ProtoMessage
func (*PlanNode_ChildLink) ProtoMessage()
func (*PlanNode_ChildLink) ProtoReflect
func (x *PlanNode_ChildLink) ProtoReflect() protoreflect.Message
func (*PlanNode_ChildLink) Reset
func (x *PlanNode_ChildLink) Reset()
func (*PlanNode_ChildLink) String
func (x *PlanNode_ChildLink) String() string
PlanNode_Kind
type PlanNode_Kind int32
The kind of [PlanNode][google.spanner.v1.PlanNode]. Distinguishes between the two different kinds of nodes that can appear in a query plan.
PlanNode_KIND_UNSPECIFIED, PlanNode_RELATIONAL, PlanNode_SCALAR
const (
// Not specified.
PlanNode_KIND_UNSPECIFIED PlanNode_Kind = 0
// Denotes a Relational operator node in the expression tree. Relational
// operators represent iterative processing of rows during query execution.
// For example, a `TableScan` operation that reads rows from a table.
PlanNode_RELATIONAL PlanNode_Kind = 1
// Denotes a Scalar node in the expression tree. Scalar nodes represent
// non-iterable entities in the query plan. For example, constants or
// arithmetic operators appearing inside predicate expressions or references
// to column names.
PlanNode_SCALAR PlanNode_Kind = 2
)
func (PlanNode_Kind) Descriptor
func (PlanNode_Kind) Descriptor() protoreflect.EnumDescriptor
func (PlanNode_Kind) Enum
func (x PlanNode_Kind) Enum() *PlanNode_Kind
func (PlanNode_Kind) EnumDescriptor
func (PlanNode_Kind) EnumDescriptor() ([]byte, []int)
Deprecated: Use PlanNode_Kind.Descriptor instead.
func (PlanNode_Kind) Number
func (x PlanNode_Kind) Number() protoreflect.EnumNumber
func (PlanNode_Kind) String
func (x PlanNode_Kind) String() string
func (PlanNode_Kind) Type
func (PlanNode_Kind) Type() protoreflect.EnumType
PlanNode_ShortRepresentation
type PlanNode_ShortRepresentation struct {
Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
Subqueries map[string]int32 "" /* 162 byte string literal not displayed */
}
Condensed representation of a node and its subtree. Only present for
SCALAR
[PlanNode(s)][google.spanner.v1.PlanNode].
func (*PlanNode_ShortRepresentation) Descriptor
func (*PlanNode_ShortRepresentation) Descriptor() ([]byte, []int)
Deprecated: Use PlanNode_ShortRepresentation.ProtoReflect.Descriptor instead.
func (*PlanNode_ShortRepresentation) GetDescription
func (x *PlanNode_ShortRepresentation) GetDescription() string
func (*PlanNode_ShortRepresentation) GetSubqueries
func (x *PlanNode_ShortRepresentation) GetSubqueries() map[string]int32
func (*PlanNode_ShortRepresentation) ProtoMessage
func (*PlanNode_ShortRepresentation) ProtoMessage()
func (*PlanNode_ShortRepresentation) ProtoReflect
func (x *PlanNode_ShortRepresentation) ProtoReflect() protoreflect.Message
func (*PlanNode_ShortRepresentation) Reset
func (x *PlanNode_ShortRepresentation) Reset()
func (*PlanNode_ShortRepresentation) String
func (x *PlanNode_ShortRepresentation) String() string
QueryPlan
type QueryPlan struct {
// The nodes in the query plan. Plan nodes are returned in pre-order starting
// with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id` corresponds to its index in
// `plan_nodes`.
PlanNodes []*PlanNode `protobuf:"bytes,1,rep,name=plan_nodes,json=planNodes,proto3" json:"plan_nodes,omitempty"`
// contains filtered or unexported fields
}
Contains an ordered list of nodes appearing in the query plan.
func (*QueryPlan) Descriptor
Deprecated: Use QueryPlan.ProtoReflect.Descriptor instead.
func (*QueryPlan) GetPlanNodes
func (*QueryPlan) ProtoMessage
func (*QueryPlan) ProtoMessage()
func (*QueryPlan) ProtoReflect
func (x *QueryPlan) ProtoReflect() protoreflect.Message
func (*QueryPlan) Reset
func (x *QueryPlan) Reset()
func (*QueryPlan) String
ReadRequest
type ReadRequest struct {
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"`
Index string `protobuf:"bytes,4,opt,name=index,proto3" json:"index,omitempty"`
Columns []string `protobuf:"bytes,5,rep,name=columns,proto3" json:"columns,omitempty"`
KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"`
Limit int64 `protobuf:"varint,8,opt,name=limit,proto3" json:"limit,omitempty"`
ResumeToken []byte `protobuf:"bytes,9,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"`
PartitionToken []byte `protobuf:"bytes,10,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
RequestOptions *RequestOptions `protobuf:"bytes,11,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
DirectedReadOptions *DirectedReadOptions `protobuf:"bytes,14,opt,name=directed_read_options,json=directedReadOptions,proto3" json:"directed_read_options,omitempty"`
DataBoostEnabled bool `protobuf:"varint,15,opt,name=data_boost_enabled,json=dataBoostEnabled,proto3" json:"data_boost_enabled,omitempty"`
OrderBy ReadRequest_OrderBy "" /* 127 byte string literal not displayed */
LockHint ReadRequest_LockHint "" /* 131 byte string literal not displayed */
}
The request for [Read][google.spanner.v1.Spanner.Read] and [StreamingRead][google.spanner.v1.Spanner.StreamingRead].
func (*ReadRequest) Descriptor
func (*ReadRequest) Descriptor() ([]byte, []int)
Deprecated: Use ReadRequest.ProtoReflect.Descriptor instead.
func (*ReadRequest) GetColumns
func (x *ReadRequest) GetColumns() []string
func (*ReadRequest) GetDataBoostEnabled
func (x *ReadRequest) GetDataBoostEnabled() bool
func (*ReadRequest) GetDirectedReadOptions
func (x *ReadRequest) GetDirectedReadOptions() *DirectedReadOptions
func (*ReadRequest) GetIndex
func (x *ReadRequest) GetIndex() string
func (*ReadRequest) GetKeySet
func (x *ReadRequest) GetKeySet() *KeySet
func (*ReadRequest) GetLimit
func (x *ReadRequest) GetLimit() int64
func (*ReadRequest) GetLockHint
func (x *ReadRequest) GetLockHint() ReadRequest_LockHint
func (*ReadRequest) GetOrderBy
func (x *ReadRequest) GetOrderBy() ReadRequest_OrderBy
func (*ReadRequest) GetPartitionToken
func (x *ReadRequest) GetPartitionToken() []byte
func (*ReadRequest) GetRequestOptions
func (x *ReadRequest) GetRequestOptions() *RequestOptions
func (*ReadRequest) GetResumeToken
func (x *ReadRequest) GetResumeToken() []byte
func (*ReadRequest) GetSession
func (x *ReadRequest) GetSession() string
func (*ReadRequest) GetTable
func (x *ReadRequest) GetTable() string
func (*ReadRequest) GetTransaction
func (x *ReadRequest) GetTransaction() *TransactionSelector
func (*ReadRequest) ProtoMessage
func (*ReadRequest) ProtoMessage()
func (*ReadRequest) ProtoReflect
func (x *ReadRequest) ProtoReflect() protoreflect.Message
func (*ReadRequest) Reset
func (x *ReadRequest) Reset()
func (*ReadRequest) String
func (x *ReadRequest) String() string
ReadRequest_LockHint
type ReadRequest_LockHint int32
A lock hint mechanism for reads done within a transaction.
ReadRequest_LOCK_HINT_UNSPECIFIED, ReadRequest_LOCK_HINT_SHARED, ReadRequest_LOCK_HINT_EXCLUSIVE
const (
// Default value.
//
// LOCK_HINT_UNSPECIFIED is equivalent to LOCK_HINT_SHARED.
ReadRequest_LOCK_HINT_UNSPECIFIED ReadRequest_LockHint = 0
// Acquire shared locks.
//
// By default when you perform a read as part of a read-write transaction,
// Spanner acquires shared read locks, which allows other reads to still
// access the data until your transaction is ready to commit. When your
// transaction is committing and writes are being applied, the transaction
// attempts to upgrade to an exclusive lock for any data you are writing.
// For more information about locks, see [Lock
// modes](https://cloud.google.com/spanner/docs/introspection/lock-statistics#explain-lock-modes).
ReadRequest_LOCK_HINT_SHARED ReadRequest_LockHint = 1
// Acquire exclusive locks.
//
// Requesting exclusive locks is beneficial if you observe high write
// contention, which means you notice that multiple transactions are
// concurrently trying to read and write to the same data, resulting in a
// large number of aborts. This problem occurs when two transactions
// initially acquire shared locks and then both try to upgrade to exclusive
// locks at the same time. In this situation both transactions are waiting
// for the other to give up their lock, resulting in a deadlocked situation.
// Spanner is able to detect this occurring and force one of the
// transactions to abort. However, this is a slow and expensive operation
// and results in lower performance. In this case it makes sense to acquire
// exclusive locks at the start of the transaction because then when
// multiple transactions try to act on the same data, they automatically get
// serialized. Each transaction waits its turn to acquire the lock and
// avoids getting into deadlock situations.
//
// Because the exclusive lock hint is just a hint, it should not be
// considered equivalent to a mutex. In other words, you should not use
// Spanner exclusive locks as a mutual exclusion mechanism for the execution
// of code outside of Spanner.
//
// **Note:** Request exclusive locks judiciously because they block others
// from reading that data for the entire transaction, rather than just when
// the writes are being performed. Unless you observe high write contention,
// you should use the default of shared read locks so you don't prematurely
// block other clients from reading the data that you're writing to.
ReadRequest_LOCK_HINT_EXCLUSIVE ReadRequest_LockHint = 2
)
func (ReadRequest_LockHint) Descriptor
func (ReadRequest_LockHint) Descriptor() protoreflect.EnumDescriptor
func (ReadRequest_LockHint) Enum
func (x ReadRequest_LockHint) Enum() *ReadRequest_LockHint
func (ReadRequest_LockHint) EnumDescriptor
func (ReadRequest_LockHint) EnumDescriptor() ([]byte, []int)
Deprecated: Use ReadRequest_LockHint.Descriptor instead.
func (ReadRequest_LockHint) Number
func (x ReadRequest_LockHint) Number() protoreflect.EnumNumber
func (ReadRequest_LockHint) String
func (x ReadRequest_LockHint) String() string
func (ReadRequest_LockHint) Type
func (ReadRequest_LockHint) Type() protoreflect.EnumType
ReadRequest_OrderBy
type ReadRequest_OrderBy int32
An option to control the order in which rows are returned from a read.
ReadRequest_ORDER_BY_UNSPECIFIED, ReadRequest_ORDER_BY_PRIMARY_KEY, ReadRequest_ORDER_BY_NO_ORDER
const (
// Default value.
//
// ORDER_BY_UNSPECIFIED is equivalent to ORDER_BY_PRIMARY_KEY.
ReadRequest_ORDER_BY_UNSPECIFIED ReadRequest_OrderBy = 0
// Read rows are returned in primary key order.
//
// In the event that this option is used in conjunction with the
// `partition_token` field, the API will return an `INVALID_ARGUMENT` error.
ReadRequest_ORDER_BY_PRIMARY_KEY ReadRequest_OrderBy = 1
// Read rows are returned in any order.
ReadRequest_ORDER_BY_NO_ORDER ReadRequest_OrderBy = 2
)
func (ReadRequest_OrderBy) Descriptor
func (ReadRequest_OrderBy) Descriptor() protoreflect.EnumDescriptor
func (ReadRequest_OrderBy) Enum
func (x ReadRequest_OrderBy) Enum() *ReadRequest_OrderBy
func (ReadRequest_OrderBy) EnumDescriptor
func (ReadRequest_OrderBy) EnumDescriptor() ([]byte, []int)
Deprecated: Use ReadRequest_OrderBy.Descriptor instead.
func (ReadRequest_OrderBy) Number
func (x ReadRequest_OrderBy) Number() protoreflect.EnumNumber
func (ReadRequest_OrderBy) String
func (x ReadRequest_OrderBy) String() string
func (ReadRequest_OrderBy) Type
func (ReadRequest_OrderBy) Type() protoreflect.EnumType
RequestOptions
type RequestOptions struct {
// Priority for the request.
Priority RequestOptions_Priority `protobuf:"varint,1,opt,name=priority,proto3,enum=google.spanner.v1.RequestOptions_Priority" json:"priority,omitempty"`
// A per-request tag which can be applied to queries or reads, used for
// statistics collection.
// Both request_tag and transaction_tag can be specified for a read or query
// that belongs to a transaction.
// This field is ignored for requests where it's not applicable (e.g.
// CommitRequest).
// Legal characters for `request_tag` values are all printable characters
// (ASCII 32 - 126) and the length of a request_tag is limited to 50
// characters. Values that exceed this limit are truncated.
// Any leading underscore (_) characters will be removed from the string.
RequestTag string `protobuf:"bytes,2,opt,name=request_tag,json=requestTag,proto3" json:"request_tag,omitempty"`
// A tag used for statistics collection about this transaction.
// Both request_tag and transaction_tag can be specified for a read or query
// that belongs to a transaction.
// The value of transaction_tag should be the same for all requests belonging
// to the same transaction.
// If this request doesn't belong to any transaction, transaction_tag will be
// ignored.
// Legal characters for `transaction_tag` values are all printable characters
// (ASCII 32 - 126) and the length of a transaction_tag is limited to 50
// characters. Values that exceed this limit are truncated.
// Any leading underscore (_) characters will be removed from the string.
TransactionTag string `protobuf:"bytes,3,opt,name=transaction_tag,json=transactionTag,proto3" json:"transaction_tag,omitempty"`
// contains filtered or unexported fields
}
Common request options for various APIs.
func (*RequestOptions) Descriptor
func (*RequestOptions) Descriptor() ([]byte, []int)
Deprecated: Use RequestOptions.ProtoReflect.Descriptor instead.
func (*RequestOptions) GetPriority
func (x *RequestOptions) GetPriority() RequestOptions_Priority
func (*RequestOptions) GetRequestTag
func (x *RequestOptions) GetRequestTag() string
func (*RequestOptions) GetTransactionTag
func (x *RequestOptions) GetTransactionTag() string
func (*RequestOptions) ProtoMessage
func (*RequestOptions) ProtoMessage()
func (*RequestOptions) ProtoReflect
func (x *RequestOptions) ProtoReflect() protoreflect.Message
func (*RequestOptions) Reset
func (x *RequestOptions) Reset()
func (*RequestOptions) String
func (x *RequestOptions) String() string
RequestOptions_Priority
type RequestOptions_Priority int32
The relative priority for requests. Note that priority is not applicable for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
The priority acts as a hint to the Cloud Spanner scheduler and does not guarantee priority or order of execution. For example:
- Some parts of a write operation always execute at
PRIORITY_HIGH
, regardless of the specified priority. This may cause you to see an increase in high priority workload even when executing a low priority request. This can also potentially cause a priority inversion where a lower priority request will be fulfilled ahead of a higher priority request. - If a transaction contains multiple operations with different priorities, Cloud Spanner does not guarantee to process the higher priority operations first. There may be other constraints to satisfy, such as order of operations.
RequestOptions_PRIORITY_UNSPECIFIED, RequestOptions_PRIORITY_LOW, RequestOptions_PRIORITY_MEDIUM, RequestOptions_PRIORITY_HIGH
const (
// `PRIORITY_UNSPECIFIED` is equivalent to `PRIORITY_HIGH`.
RequestOptions_PRIORITY_UNSPECIFIED RequestOptions_Priority = 0
// This specifies that the request is low priority.
RequestOptions_PRIORITY_LOW RequestOptions_Priority = 1
// This specifies that the request is medium priority.
RequestOptions_PRIORITY_MEDIUM RequestOptions_Priority = 2
// This specifies that the request is high priority.
RequestOptions_PRIORITY_HIGH RequestOptions_Priority = 3
)
func (RequestOptions_Priority) Descriptor
func (RequestOptions_Priority) Descriptor() protoreflect.EnumDescriptor
func (RequestOptions_Priority) Enum
func (x RequestOptions_Priority) Enum() *RequestOptions_Priority
func (RequestOptions_Priority) EnumDescriptor
func (RequestOptions_Priority) EnumDescriptor() ([]byte, []int)
Deprecated: Use RequestOptions_Priority.Descriptor instead.
func (RequestOptions_Priority) Number
func (x RequestOptions_Priority) Number() protoreflect.EnumNumber
func (RequestOptions_Priority) String
func (x RequestOptions_Priority) String() string
func (RequestOptions_Priority) Type
func (RequestOptions_Priority) Type() protoreflect.EnumType
ResultSet
type ResultSet struct {
// Metadata about the result set, such as row type information.
Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// Each element in `rows` is a row whose format is defined by
// [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith element
// in each row matches the ith field in
// [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements are
// encoded based on type as described
// [here][google.spanner.v1.TypeCode].
Rows []*structpb.ListValue `protobuf:"bytes,2,rep,name=rows,proto3" json:"rows,omitempty"`
// Query plan and execution statistics for the SQL statement that
// produced this result set. These can be requested by setting
// [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
// DML statements always produce stats containing the number of rows
// modified, unless executed using the
// [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN] [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
// Other fields may or may not be populated, based on the
// [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
Stats *ResultSetStats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"`
// contains filtered or unexported fields
}
Results from [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
func (*ResultSet) Descriptor
Deprecated: Use ResultSet.ProtoReflect.Descriptor instead.
func (*ResultSet) GetMetadata
func (x *ResultSet) GetMetadata() *ResultSetMetadata
func (*ResultSet) GetRows
func (*ResultSet) GetStats
func (x *ResultSet) GetStats() *ResultSetStats
func (*ResultSet) ProtoMessage
func (*ResultSet) ProtoMessage()
func (*ResultSet) ProtoReflect
func (x *ResultSet) ProtoReflect() protoreflect.Message
func (*ResultSet) Reset
func (x *ResultSet) Reset()
func (*ResultSet) String
ResultSetMetadata
type ResultSetMetadata struct {
// Indicates the field names and types for the rows in the result
// set. For example, a SQL query like `"SELECT UserId, UserName FROM
// Users"` could return a `row_type` value like:
//
// "fields": [
// { "name": "UserId", "type": { "code": "INT64" } },
// { "name": "UserName", "type": { "code": "STRING" } },
// ]
RowType *StructType `protobuf:"bytes,1,opt,name=row_type,json=rowType,proto3" json:"row_type,omitempty"`
// If the read or SQL query began a transaction as a side-effect, the
// information about the new transaction is yielded here.
Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
// A SQL query can be parameterized. In PLAN mode, these parameters can be
// undeclared. This indicates the field names and types for those undeclared
// parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
// Users where UserId = @userId and UserName = @userName "` could return a
// `undeclared_parameters` value like:
//
// "fields": [
// { "name": "UserId", "type": { "code": "INT64" } },
// { "name": "UserName", "type": { "code": "STRING" } },
// ]
UndeclaredParameters *StructType `protobuf:"bytes,3,opt,name=undeclared_parameters,json=undeclaredParameters,proto3" json:"undeclared_parameters,omitempty"`
// contains filtered or unexported fields
}
Metadata about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet].
func (*ResultSetMetadata) Descriptor
func (*ResultSetMetadata) Descriptor() ([]byte, []int)
Deprecated: Use ResultSetMetadata.ProtoReflect.Descriptor instead.
func (*ResultSetMetadata) GetRowType
func (x *ResultSetMetadata) GetRowType() *StructType
func (*ResultSetMetadata) GetTransaction
func (x *ResultSetMetadata) GetTransaction() *Transaction
func (*ResultSetMetadata) GetUndeclaredParameters
func (x *ResultSetMetadata) GetUndeclaredParameters() *StructType
func (*ResultSetMetadata) ProtoMessage
func (*ResultSetMetadata) ProtoMessage()
func (*ResultSetMetadata) ProtoReflect
func (x *ResultSetMetadata) ProtoReflect() protoreflect.Message
func (*ResultSetMetadata) Reset
func (x *ResultSetMetadata) Reset()
func (*ResultSetMetadata) String
func (x *ResultSetMetadata) String() string
ResultSetStats
type ResultSetStats struct {
// [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this result.
QueryPlan *QueryPlan `protobuf:"bytes,1,opt,name=query_plan,json=queryPlan,proto3" json:"query_plan,omitempty"`
// Aggregated statistics from the execution of the query. Only present when
// the query is profiled. For example, a query could return the statistics as
// follows:
//
// {
// "rows_returned": "3",
// "elapsed_time": "1.22 secs",
// "cpu_time": "1.19 secs"
// }
QueryStats *structpb.Struct `protobuf:"bytes,2,opt,name=query_stats,json=queryStats,proto3" json:"query_stats,omitempty"`
// The number of rows modified by the DML statement.
//
// Types that are assignable to RowCount:
//
// *ResultSetStats_RowCountExact
// *ResultSetStats_RowCountLowerBound
RowCount isResultSetStats_RowCount `protobuf_oneof:"row_count"`
// contains filtered or unexported fields
}
Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet].
func (*ResultSetStats) Descriptor
func (*ResultSetStats) Descriptor() ([]byte, []int)
Deprecated: Use ResultSetStats.ProtoReflect.Descriptor instead.
func (*ResultSetStats) GetQueryPlan
func (x *ResultSetStats) GetQueryPlan() *QueryPlan
func (*ResultSetStats) GetQueryStats
func (x *ResultSetStats) GetQueryStats() *structpb.Struct
func (*ResultSetStats) GetRowCount
func (m *ResultSetStats) GetRowCount() isResultSetStats_RowCount
func (*ResultSetStats) GetRowCountExact
func (x *ResultSetStats) GetRowCountExact() int64
func (*ResultSetStats) GetRowCountLowerBound
func (x *ResultSetStats) GetRowCountLowerBound() int64
func (*ResultSetStats) ProtoMessage
func (*ResultSetStats) ProtoMessage()
func (*ResultSetStats) ProtoReflect
func (x *ResultSetStats) ProtoReflect() protoreflect.Message
func (*ResultSetStats) Reset
func (x *ResultSetStats) Reset()
func (*ResultSetStats) String
func (x *ResultSetStats) String() string
ResultSetStats_RowCountExact
type ResultSetStats_RowCountExact struct {
// Standard DML returns an exact count of rows that were modified.
RowCountExact int64 `protobuf:"varint,3,opt,name=row_count_exact,json=rowCountExact,proto3,oneof"`
}
ResultSetStats_RowCountLowerBound
type ResultSetStats_RowCountLowerBound struct {
// Partitioned DML does not offer exactly-once semantics, so it
// returns a lower bound of the rows modified.
RowCountLowerBound int64 `protobuf:"varint,4,opt,name=row_count_lower_bound,json=rowCountLowerBound,proto3,oneof"`
}
RollbackRequest
type RollbackRequest struct {
// Required. The session in which the transaction to roll back is running.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Required. The transaction to roll back.
TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"`
// contains filtered or unexported fields
}
The request for [Rollback][google.spanner.v1.Spanner.Rollback].
func (*RollbackRequest) Descriptor
func (*RollbackRequest) Descriptor() ([]byte, []int)
Deprecated: Use RollbackRequest.ProtoReflect.Descriptor instead.
func (*RollbackRequest) GetSession
func (x *RollbackRequest) GetSession() string
func (*RollbackRequest) GetTransactionId
func (x *RollbackRequest) GetTransactionId() []byte
func (*RollbackRequest) ProtoMessage
func (*RollbackRequest) ProtoMessage()
func (*RollbackRequest) ProtoReflect
func (x *RollbackRequest) ProtoReflect() protoreflect.Message
func (*RollbackRequest) Reset
func (x *RollbackRequest) Reset()
func (*RollbackRequest) String
func (x *RollbackRequest) String() string
Session
type Session struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Labels map[string]string "" /* 153 byte string literal not displayed */
CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
ApproximateLastUseTime *timestamppb.Timestamp "" /* 131 byte string literal not displayed */
CreatorRole string `protobuf:"bytes,5,opt,name=creator_role,json=creatorRole,proto3" json:"creator_role,omitempty"`
Multiplexed bool `protobuf:"varint,6,opt,name=multiplexed,proto3" json:"multiplexed,omitempty"`
}
A session in the Cloud Spanner API.
func (*Session) Descriptor
Deprecated: Use Session.ProtoReflect.Descriptor instead.
func (*Session) GetApproximateLastUseTime
func (x *Session) GetApproximateLastUseTime() *timestamppb.Timestamp
func (*Session) GetCreateTime
func (x *Session) GetCreateTime() *timestamppb.Timestamp
func (*Session) GetCreatorRole
func (*Session) GetLabels
func (*Session) GetMultiplexed
func (*Session) GetName
func (*Session) ProtoMessage
func (*Session) ProtoMessage()
func (*Session) ProtoReflect
func (x *Session) ProtoReflect() protoreflect.Message
func (*Session) Reset
func (x *Session) Reset()
func (*Session) String
SpannerClient
type SpannerClient interface {
// Creates a new session. A session can be used to perform
// transactions that read and/or modify data in a Cloud Spanner database.
// Sessions are meant to be reused for many consecutive
// transactions.
//
// Sessions can only execute one transaction at a time. To execute
// multiple concurrent read-write/write-only transactions, create
// multiple sessions. Note that standalone reads and queries use a
// transaction internally, and count toward the one transaction
// limit.
//
// Active sessions use additional server resources, so it is a good idea to
// delete idle and unneeded sessions.
// Aside from explicit deletes, Cloud Spanner may delete sessions for which no
// operations are sent for more than an hour. If a session is deleted,
// requests to it return `NOT_FOUND`.
//
// Idle sessions can be kept alive by sending a trivial SQL query
// periodically, e.g., `"SELECT 1"`.
CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*Session, error)
// Creates multiple new sessions.
//
// This API can be used to initialize a session cache on the clients.
// See https://goo.gl/TgSFN2 for best practices on session cache management.
BatchCreateSessions(ctx context.Context, in *BatchCreateSessionsRequest, opts ...grpc.CallOption) (*BatchCreateSessionsResponse, error)
// Gets a session. Returns `NOT_FOUND` if the session does not exist.
// This is mainly useful for determining whether a session is still
// alive.
GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error)
// Lists all sessions in a given database.
ListSessions(ctx context.Context, in *ListSessionsRequest, opts ...grpc.CallOption) (*ListSessionsResponse, error)
// Ends a session, releasing server resources associated with it. This will
// asynchronously trigger cancellation of any operations that are running with
// this session.
DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Executes an SQL statement, returning all results in a single reply. This
// method cannot be used to return a result set larger than 10 MiB;
// if the query yields more data than that, the query fails with
// a `FAILED_PRECONDITION` error.
//
// Operations inside read-write transactions might return `ABORTED`. If
// this occurs, the application should restart the transaction from
// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
// details.
//
// Larger result sets can be fetched in streaming fashion by calling
// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
// instead.
ExecuteSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (*ResultSet, error)
// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
// result set as a stream. Unlike
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
// the size of the returned result set. However, no individual row in the
// result set can exceed 100 MiB, and no column value can exceed 10 MiB.
ExecuteStreamingSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (Spanner_ExecuteStreamingSqlClient, error)
// Executes a batch of SQL DML statements. This method allows many statements
// to be run with lower latency than submitting them sequentially with
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
//
// Statements are executed in sequential order. A request can succeed even if
// a statement fails. The
// [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
// field in the response provides information about the statement that failed.
// Clients must inspect this field to determine whether an error occurred.
//
// Execution stops after the first failed statement; the remaining statements
// are not executed.
ExecuteBatchDml(ctx context.Context, in *ExecuteBatchDmlRequest, opts ...grpc.CallOption) (*ExecuteBatchDmlResponse, error)
// Reads rows from the database using key lookups and scans, as a
// simple key/value style alternative to
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be
// used to return a result set larger than 10 MiB; if the read matches more
// data than that, the read fails with a `FAILED_PRECONDITION`
// error.
//
// Reads inside read-write transactions might return `ABORTED`. If
// this occurs, the application should restart the transaction from
// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
// details.
//
// Larger result sets can be yielded in streaming fashion by calling
// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ResultSet, error)
// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
// as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
// limit on the size of the returned result set. However, no individual row in
// the result set can exceed 100 MiB, and no column value can exceed
// 10 MiB.
StreamingRead(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (Spanner_StreamingReadClient, error)
// Begins a new transaction. This step can often be skipped:
// [Read][google.spanner.v1.Spanner.Read],
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
// side-effect.
BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*Transaction, error)
// Commits a transaction. The request includes the mutations to be
// applied to rows in the database.
//
// `Commit` might return an `ABORTED` error. This can occur at any time;
// commonly, the cause is conflicts with concurrent
// transactions. However, it can also happen for a variety of other
// reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
// the transaction from the beginning, re-using the same session.
//
// On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
// for example, if the client job experiences a 1+ hour networking failure.
// At that point, Cloud Spanner has lost track of the transaction outcome and
// we recommend that you perform another read from the database to see the
// state of things as they are now.
Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error)
// Rolls back a transaction, releasing any locks it holds. It is a good
// idea to call this for any transaction that includes one or more
// [Read][google.spanner.v1.Spanner.Read] or
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
// decides not to commit.
//
// `Rollback` returns `OK` if it successfully aborts the transaction, the
// transaction was already aborted, or the transaction is not
// found. `Rollback` never returns `ABORTED`.
Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Creates a set of partition tokens that can be used to execute a query
// operation in parallel. Each of the returned partition tokens can be used
// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
// specify a subset of the query result to read. The same session and
// read-only transaction must be used by the PartitionQueryRequest used to
// create the partition tokens and the ExecuteSqlRequests that use the
// partition tokens.
//
// Partition tokens become invalid when the session used to create them
// is deleted, is idle for too long, begins a new transaction, or becomes too
// old. When any of these happen, it is not possible to resume the query, and
// the whole operation must be restarted from the beginning.
PartitionQuery(ctx context.Context, in *PartitionQueryRequest, opts ...grpc.CallOption) (*PartitionResponse, error)
// Creates a set of partition tokens that can be used to execute a read
// operation in parallel. Each of the returned partition tokens can be used
// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
// subset of the read result to read. The same session and read-only
// transaction must be used by the PartitionReadRequest used to create the
// partition tokens and the ReadRequests that use the partition tokens. There
// are no ordering guarantees on rows returned among the returned partition
// tokens, or even within each individual StreamingRead call issued with a
// partition_token.
//
// Partition tokens become invalid when the session used to create them
// is deleted, is idle for too long, begins a new transaction, or becomes too
// old. When any of these happen, it is not possible to resume the read, and
// the whole operation must be restarted from the beginning.
PartitionRead(ctx context.Context, in *PartitionReadRequest, opts ...grpc.CallOption) (*PartitionResponse, error)
// Batches the supplied mutation groups in a collection of efficient
// transactions. All mutations in a group are committed atomically. However,
// mutations across groups can be committed non-atomically in an unspecified
// order and thus, they must be independent of each other. Partial failure is
// possible, i.e., some groups may have been committed successfully, while
// some may have failed. The results of individual batches are streamed into
// the response as the batches are applied.
//
// BatchWrite requests are not replay protected, meaning that each mutation
// group may be applied more than once. Replays of non-idempotent mutations
// may have undesirable effects. For example, replays of an insert mutation
// may produce an already exists error or if you use generated or commit
// timestamp-based keys, it may result in additional rows being added to the
// mutation's table. We recommend structuring your mutation groups to be
// idempotent to avoid this issue.
BatchWrite(ctx context.Context, in *BatchWriteRequest, opts ...grpc.CallOption) (Spanner_BatchWriteClient, error)
}
SpannerClient is the client API for Spanner service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewSpannerClient
func NewSpannerClient(cc grpc.ClientConnInterface) SpannerClient
SpannerServer
type SpannerServer interface {
// Creates a new session. A session can be used to perform
// transactions that read and/or modify data in a Cloud Spanner database.
// Sessions are meant to be reused for many consecutive
// transactions.
//
// Sessions can only execute one transaction at a time. To execute
// multiple concurrent read-write/write-only transactions, create
// multiple sessions. Note that standalone reads and queries use a
// transaction internally, and count toward the one transaction
// limit.
//
// Active sessions use additional server resources, so it is a good idea to
// delete idle and unneeded sessions.
// Aside from explicit deletes, Cloud Spanner may delete sessions for which no
// operations are sent for more than an hour. If a session is deleted,
// requests to it return `NOT_FOUND`.
//
// Idle sessions can be kept alive by sending a trivial SQL query
// periodically, e.g., `"SELECT 1"`.
CreateSession(context.Context, *CreateSessionRequest) (*Session, error)
// Creates multiple new sessions.
//
// This API can be used to initialize a session cache on the clients.
// See https://goo.gl/TgSFN2 for best practices on session cache management.
BatchCreateSessions(context.Context, *BatchCreateSessionsRequest) (*BatchCreateSessionsResponse, error)
// Gets a session. Returns `NOT_FOUND` if the session does not exist.
// This is mainly useful for determining whether a session is still
// alive.
GetSession(context.Context, *GetSessionRequest) (*Session, error)
// Lists all sessions in a given database.
ListSessions(context.Context, *ListSessionsRequest) (*ListSessionsResponse, error)
// Ends a session, releasing server resources associated with it. This will
// asynchronously trigger cancellation of any operations that are running with
// this session.
DeleteSession(context.Context, *DeleteSessionRequest) (*emptypb.Empty, error)
// Executes an SQL statement, returning all results in a single reply. This
// method cannot be used to return a result set larger than 10 MiB;
// if the query yields more data than that, the query fails with
// a `FAILED_PRECONDITION` error.
//
// Operations inside read-write transactions might return `ABORTED`. If
// this occurs, the application should restart the transaction from
// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
// details.
//
// Larger result sets can be fetched in streaming fashion by calling
// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
// instead.
ExecuteSql(context.Context, *ExecuteSqlRequest) (*ResultSet, error)
// Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
// result set as a stream. Unlike
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
// the size of the returned result set. However, no individual row in the
// result set can exceed 100 MiB, and no column value can exceed 10 MiB.
ExecuteStreamingSql(*ExecuteSqlRequest, Spanner_ExecuteStreamingSqlServer) error
// Executes a batch of SQL DML statements. This method allows many statements
// to be run with lower latency than submitting them sequentially with
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
//
// Statements are executed in sequential order. A request can succeed even if
// a statement fails. The
// [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
// field in the response provides information about the statement that failed.
// Clients must inspect this field to determine whether an error occurred.
//
// Execution stops after the first failed statement; the remaining statements
// are not executed.
ExecuteBatchDml(context.Context, *ExecuteBatchDmlRequest) (*ExecuteBatchDmlResponse, error)
// Reads rows from the database using key lookups and scans, as a
// simple key/value style alternative to
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be
// used to return a result set larger than 10 MiB; if the read matches more
// data than that, the read fails with a `FAILED_PRECONDITION`
// error.
//
// Reads inside read-write transactions might return `ABORTED`. If
// this occurs, the application should restart the transaction from
// the beginning. See [Transaction][google.spanner.v1.Transaction] for more
// details.
//
// Larger result sets can be yielded in streaming fashion by calling
// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
Read(context.Context, *ReadRequest) (*ResultSet, error)
// Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
// as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
// limit on the size of the returned result set. However, no individual row in
// the result set can exceed 100 MiB, and no column value can exceed
// 10 MiB.
StreamingRead(*ReadRequest, Spanner_StreamingReadServer) error
// Begins a new transaction. This step can often be skipped:
// [Read][google.spanner.v1.Spanner.Read],
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
// side-effect.
BeginTransaction(context.Context, *BeginTransactionRequest) (*Transaction, error)
// Commits a transaction. The request includes the mutations to be
// applied to rows in the database.
//
// `Commit` might return an `ABORTED` error. This can occur at any time;
// commonly, the cause is conflicts with concurrent
// transactions. However, it can also happen for a variety of other
// reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
// the transaction from the beginning, re-using the same session.
//
// On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
// for example, if the client job experiences a 1+ hour networking failure.
// At that point, Cloud Spanner has lost track of the transaction outcome and
// we recommend that you perform another read from the database to see the
// state of things as they are now.
Commit(context.Context, *CommitRequest) (*CommitResponse, error)
// Rolls back a transaction, releasing any locks it holds. It is a good
// idea to call this for any transaction that includes one or more
// [Read][google.spanner.v1.Spanner.Read] or
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
// decides not to commit.
//
// `Rollback` returns `OK` if it successfully aborts the transaction, the
// transaction was already aborted, or the transaction is not
// found. `Rollback` never returns `ABORTED`.
Rollback(context.Context, *RollbackRequest) (*emptypb.Empty, error)
// Creates a set of partition tokens that can be used to execute a query
// operation in parallel. Each of the returned partition tokens can be used
// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
// specify a subset of the query result to read. The same session and
// read-only transaction must be used by the PartitionQueryRequest used to
// create the partition tokens and the ExecuteSqlRequests that use the
// partition tokens.
//
// Partition tokens become invalid when the session used to create them
// is deleted, is idle for too long, begins a new transaction, or becomes too
// old. When any of these happen, it is not possible to resume the query, and
// the whole operation must be restarted from the beginning.
PartitionQuery(context.Context, *PartitionQueryRequest) (*PartitionResponse, error)
// Creates a set of partition tokens that can be used to execute a read
// operation in parallel. Each of the returned partition tokens can be used
// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
// subset of the read result to read. The same session and read-only
// transaction must be used by the PartitionReadRequest used to create the
// partition tokens and the ReadRequests that use the partition tokens. There
// are no ordering guarantees on rows returned among the returned partition
// tokens, or even within each individual StreamingRead call issued with a
// partition_token.
//
// Partition tokens become invalid when the session used to create them
// is deleted, is idle for too long, begins a new transaction, or becomes too
// old. When any of these happen, it is not possible to resume the read, and
// the whole operation must be restarted from the beginning.
PartitionRead(context.Context, *PartitionReadRequest) (*PartitionResponse, error)
// Batches the supplied mutation groups in a collection of efficient
// transactions. All mutations in a group are committed atomically. However,
// mutations across groups can be committed non-atomically in an unspecified
// order and thus, they must be independent of each other. Partial failure is
// possible, i.e., some groups may have been committed successfully, while
// some may have failed. The results of individual batches are streamed into
// the response as the batches are applied.
//
// BatchWrite requests are not replay protected, meaning that each mutation
// group may be applied more than once. Replays of non-idempotent mutations
// may have undesirable effects. For example, replays of an insert mutation
// may produce an already exists error or if you use generated or commit
// timestamp-based keys, it may result in additional rows being added to the
// mutation's table. We recommend structuring your mutation groups to be
// idempotent to avoid this issue.
BatchWrite(*BatchWriteRequest, Spanner_BatchWriteServer) error
}
SpannerServer is the server API for Spanner service.
Spanner_BatchWriteClient
type Spanner_BatchWriteClient interface {
Recv() (*BatchWriteResponse, error)
grpc.ClientStream
}
Spanner_BatchWriteServer
type Spanner_BatchWriteServer interface {
Send(*BatchWriteResponse) error
grpc.ServerStream
}
Spanner_ExecuteStreamingSqlClient
type Spanner_ExecuteStreamingSqlClient interface {
Recv() (*PartialResultSet, error)
grpc.ClientStream
}
Spanner_ExecuteStreamingSqlServer
type Spanner_ExecuteStreamingSqlServer interface {
Send(*PartialResultSet) error
grpc.ServerStream
}
Spanner_StreamingReadClient
type Spanner_StreamingReadClient interface {
Recv() (*PartialResultSet, error)
grpc.ClientStream
}
Spanner_StreamingReadServer
type Spanner_StreamingReadServer interface {
Send(*PartialResultSet) error
grpc.ServerStream
}
StructType
type StructType struct {
// The list of fields that make up this struct. Order is
// significant, because values of this struct type are represented as
// lists, where the order of field values matches the order of
// fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields
// matches the order of columns in a read request, or the order of
// fields in the `SELECT` clause of a query.
Fields []*StructType_Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"`
// contains filtered or unexported fields
}
StructType
defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type.
func (*StructType) Descriptor
func (*StructType) Descriptor() ([]byte, []int)
Deprecated: Use StructType.ProtoReflect.Descriptor instead.
func (*StructType) GetFields
func (x *StructType) GetFields() []*StructType_Field
func (*StructType) ProtoMessage
func (*StructType) ProtoMessage()
func (*StructType) ProtoReflect
func (x *StructType) ProtoReflect() protoreflect.Message
func (*StructType) Reset
func (x *StructType) Reset()
func (*StructType) String
func (x *StructType) String() string
StructType_Field
type StructType_Field struct {
// The name of the field. For reads, this is the column name. For
// SQL queries, it is the column alias (e.g., `"Word"` in the
// query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
// `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
// columns might have an empty name (e.g., `"SELECT
// UPPER(ColName)"`). Note that a query result can contain
// multiple fields with the same name.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The type of the field.
Type *Type `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
// contains filtered or unexported fields
}
Message representing a single field of a struct.
func (*StructType_Field) Descriptor
func (*StructType_Field) Descriptor() ([]byte, []int)
Deprecated: Use StructType_Field.ProtoReflect.Descriptor instead.
func (*StructType_Field) GetName
func (x *StructType_Field) GetName() string
func (*StructType_Field) GetType
func (x *StructType_Field) GetType() *Type
func (*StructType_Field) ProtoMessage
func (*StructType_Field) ProtoMessage()
func (*StructType_Field) ProtoReflect
func (x *StructType_Field) ProtoReflect() protoreflect.Message
func (*StructType_Field) Reset
func (x *StructType_Field) Reset()
func (*StructType_Field) String
func (x *StructType_Field) String() string
Transaction
type Transaction struct {
// `id` may be used to identify the transaction in subsequent
// [Read][google.spanner.v1.Spanner.Read],
// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql],
// [Commit][google.spanner.v1.Spanner.Commit], or
// [Rollback][google.spanner.v1.Spanner.Rollback] calls.
//
// Single-use read-only transactions do not have IDs, because
// single-use transactions do not support multiple requests.
Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// For snapshot read-only transactions, the read timestamp chosen
// for the transaction. Not returned by default: see
// [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
//
// A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
// Example: `"2014-10-02T15:01:23.045123456Z"`.
ReadTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=read_timestamp,json=readTimestamp,proto3" json:"read_timestamp,omitempty"`
// contains filtered or unexported fields
}
A transaction.
func (*Transaction) Descriptor
func (*Transaction) Descriptor() ([]byte, []int)
Deprecated: Use Transaction.ProtoReflect.Descriptor instead.
func (*Transaction) GetId
func (x *Transaction) GetId() []byte
func (*Transaction) GetReadTimestamp
func (x *Transaction) GetReadTimestamp() *timestamppb.Timestamp
func (*Transaction) ProtoMessage
func (*Transaction) ProtoMessage()
func (*Transaction) ProtoReflect
func (x *Transaction) ProtoReflect() protoreflect.Message
func (*Transaction) Reset
func (x *Transaction) Reset()
func (*Transaction) String
func (x *Transaction) String() string
TransactionOptions
type TransactionOptions struct {
Mode isTransactionOptions_Mode `protobuf_oneof:"mode"`
ExcludeTxnFromChangeStreams bool "" /* 149 byte string literal not displayed */
}
Transactions:
Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and do count towards the one transaction limit). After the active transaction is completed, the session can immediately be re-used for the next transaction. It is not necessary to create a new session for each transaction.
Transaction modes:
Cloud Spanner supports three transaction modes:
Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on pessimistic locking and, if necessary, two-phase commit. Locking read-write transactions may abort, requiring the application to retry.
Snapshot read-only. Snapshot read-only transactions provide guaranteed consistency across several reads, but do not allow writes. Snapshot read-only transactions can be configured to read at timestamps in the past, or configured to perform a strong read (where Spanner will select a timestamp such that the read is guaranteed to see the effects of all transactions that have committed before the start of the read). Snapshot read-only transactions do not need to be committed.
Queries on change streams must be performed with the snapshot read-only transaction mode, specifying a strong read. Please see [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong] for more details.
Partitioned DML. This type of transaction is used to execute a single Partitioned DML statement. Partitioned DML partitions the key space and runs the DML statement over each partition in parallel using separate, internal transactions that commit independently. Partitioned DML transactions do not need to be committed.
For transactions that only read, snapshot read-only transactions provide simpler semantics and are almost always faster. In particular, read-only transactions do not take locks, so they do not conflict with read-write transactions. As a consequence of not taking locks, they also do not abort, so retry loops are not needed.
Transactions may only read-write data in a single database. They may, however, read-write data in different tables within that database.
Locking read-write transactions:
Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally consistent.
Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and cause less contention. Cloud Spanner attempts to keep read locks active as long as the transaction continues to do reads, and the transaction has not been terminated by [Commit][google.spanner.v1.Spanner.Commit] or [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of inactivity at the client may cause Cloud Spanner to release a transaction's locks and abort it.
Conceptually, a read-write transaction consists of zero or more reads or SQL statements followed by [Commit][google.spanner.v1.Spanner.Commit]. At any time before [Commit][google.spanner.v1.Spanner.Commit], the client can send a [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the transaction.
Semantics:
Cloud Spanner can commit the transaction if all read locks it acquired
are still valid at commit time, and it is able to acquire write
locks for all writes. Cloud Spanner can abort the transaction for any
reason. If a commit attempt returns ABORTED
, Cloud Spanner guarantees
that the transaction has not modified any user data in Cloud Spanner.
Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves.
Retrying aborted transactions:
When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully committing the retry, the client should execute the retry in the same session as the original attempt. The original session's lock priority increases with each consecutive abort, meaning that each attempt has a slightly better chance of success than the previous.
Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a short period before successfully committing. Thus, it is not a good idea to cap the number of retries a transaction can attempt; instead, it is better to limit the total amount of time spent retrying.
Idle transactions:
A transaction is considered idle if it has no outstanding reads or
SQL queries and has not started a read or SQL query within the last 10
seconds. Idle transactions can be aborted by Cloud Spanner so that they
don't hold on to locks indefinitely. If an idle transaction is aborted, the
commit will fail with error ABORTED
.
If this behavior is undesirable, periodically executing a simple
SQL query in the transaction (for example, SELECT 1
) prevents the
transaction from becoming idle.
Snapshot read-only transactions:
Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. However, this type of transaction does not support writes.
Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do not acquire locks, they do not block concurrent read-write transactions.
Unlike locking read-write transactions, snapshot read-only transactions never abort. They can fail if the chosen read timestamp is garbage collected; however, the default garbage collection policy is generous enough that most applications do not need to worry about this in practice.
Snapshot read-only transactions do not need to call [Commit][google.spanner.v1.Spanner.Commit] or [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not permitted to do so).
To execute a snapshot transaction, the client specifies a timestamp bound, which tells Cloud Spanner how to choose a read timestamp.
The types of timestamp bound are:
- Strong (the default).
- Bounded staleness.
- Exact staleness.
If the Cloud Spanner database to be read is geographically distributed, stale read-only transactions can execute more quickly than strong or read-write transactions, because they are able to execute far from the leader replica.
Each type of timestamp bound is discussed in detail below.
Strong: Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all rows yielded by a single read are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction.
Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are concurrent writes. If consistency across reads is required, the reads should be executed within a transaction or at an exact read timestamp.
Queries on change streams (see below for more details) must also specify the strong read timestamp bound.
See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong].
Exact staleness:
These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of the global transaction history: they observe modifications done by all transactions with a commit timestamp less than or equal to the read timestamp, and observe none of the modifications done by transactions with a larger commit timestamp. They will block until all conflicting transactions that may be assigned commit timestamps <= the read timestamp have finished.
The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time.
These modes do not require a "negotiation phase" to pick a timestamp. As a result, they execute slightly faster than the equivalent boundedly stale concurrency modes. On the other hand, boundedly stale reads usually return fresher results.
See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness].
Bounded staleness:
Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within the staleness bound that allows execution of the reads at the closest available replica without blocking.
All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the transaction. Boundedly stale reads are not repeatable: two stale reads, even if they use the same staleness bound, can execute at different timestamps and thus return inconsistent results.
Boundedly stale reads execute in two phases: the first phase negotiates a timestamp among all replicas needed to serve the read. In the second phase, reads are executed at the negotiated timestamp.
As a result of the two phase execution, bounded staleness reads are usually a little slower than comparable exact staleness reads. However, they are typically able to return fresher results, and are more likely to execute at the closest replica.
Because the timestamp negotiation requires up-front knowledge of which rows will be read, it can only be used with single-use read-only transactions.
See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp].
Old read timestamps and garbage collection:
Cloud Spanner continuously garbage collects deleted and overwritten data
in the background to reclaim storage space. This process is known
as "version GC". By default, version GC reclaims versions after they
are one hour old. Because of this, Cloud Spanner cannot perform reads
at read timestamps more than one hour in the past. This
restriction also applies to in-progress reads and/or SQL queries whose
timestamp become too old while executing. Reads and SQL queries with
too-old read timestamps fail with the error FAILED_PRECONDITION
.
You can configure and extend the VERSION_RETENTION_PERIOD
of a
database up to a period as long as one week, which allows Cloud Spanner
to perform reads up to one week in the past.
Querying change Streams:
A Change Stream is a schema object that can be configured to watch data changes on the entire database, a set of tables, or a set of columns in a database.
When a change stream is created, Spanner automatically defines a corresponding SQL Table-Valued Function (TVF) that can be used to query the change records in the associated change stream using the ExecuteStreamingSql API. The name of the TVF for a change stream is generated from the name of the change stream: READ_<change_stream_name>.
All queries on change stream TVFs must be executed using the ExecuteStreamingSql API with a single-use read-only transaction with a strong read-only timestamp_bound. The change stream TVF allows users to specify the start_timestamp and end_timestamp for the time range of interest. All change records within the retention period is accessible using the strong read-only timestamp_bound. All other TransactionOptions are invalid for change stream queries.
In addition, if TransactionOptions.read_only.return_read_timestamp is set to true, a special value of 2^63 - 2 will be returned in the [Transaction][google.spanner.v1.Transaction] message that describes the transaction, instead of a valid read timestamp. This special value should be discarded and not used for any subsequent queries.
Please see https://cloud.google.com/spanner/docs/change-streams for more details on how to query the change stream TVFs.
Partitioned DML transactions:
Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often better, scalability properties for large, table-wide operations than DML in a ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, should prefer using ReadWrite transactions.
Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These transactions commit automatically when complete, and run independently from one another.
To reduce lock contention, this execution strategy only acquires read locks on rows that match the WHERE clause of the statement. Additionally, the smaller per-partition transactions hold locks for less time.
That said, Partitioned DML is not a drop-in replacement for standard DML used in ReadWrite transactions.
The DML statement must be fully-partitionable. Specifically, the statement must be expressible as the union of many statements which each access only a single row of the table.
The statement is not applied atomically to all rows of the table. Rather, the statement is applied atomically to partitions of the table, in independent transactions. Secondary index rows are updated atomically with the base table rows.
Partitioned DML does not guarantee exactly-once execution semantics against a partition. The statement will be applied at least once to each partition. It is strongly recommended that the DML statement should be idempotent to avoid unexpected results. For instance, it is potentially dangerous to run a statement such as
UPDATE table SET column = column + 1
as it could be run multiple times against some rows.The partitions are committed automatically - there is no support for Commit or Rollback. If the call returns an error, or if the client issuing the ExecuteSql call dies, it is possible that some rows had the statement executed on them successfully. It is also possible that statement was never executed against other rows.
Partitioned DML transactions may only contain the execution of a single DML statement via ExecuteSql or ExecuteStreamingSql.
If any error is encountered during the execution of the partitioned DML operation (for instance, a UNIQUE INDEX violation, division by zero, or a value that cannot be stored due to schema constraints), then the operation is stopped at that point and an error is returned. It is possible that at this point, some partitions have been committed (or even committed multiple times), and other partitions have not been run at all.
Given the above, Partitioned DML is good fit for large, database-wide, operations that are idempotent, such as deleting old rows from a very large table.
func (*TransactionOptions) Descriptor
func (*TransactionOptions) Descriptor() ([]byte, []int)
Deprecated: Use TransactionOptions.ProtoReflect.Descriptor instead.
func (*TransactionOptions) GetExcludeTxnFromChangeStreams
func (x *TransactionOptions) GetExcludeTxnFromChangeStreams() bool
func (*TransactionOptions) GetMode
func (m *TransactionOptions) GetMode() isTransactionOptions_Mode
func (*TransactionOptions) GetPartitionedDml
func (x *TransactionOptions) GetPartitionedDml() *TransactionOptions_PartitionedDml
func (*TransactionOptions) GetReadOnly
func (x *TransactionOptions) GetReadOnly() *TransactionOptions_ReadOnly
func (*TransactionOptions) GetReadWrite
func (x *TransactionOptions) GetReadWrite() *TransactionOptions_ReadWrite
func (*TransactionOptions) ProtoMessage
func (*TransactionOptions) ProtoMessage()
func (*TransactionOptions) ProtoReflect
func (x *TransactionOptions) ProtoReflect() protoreflect.Message
func (*TransactionOptions) Reset
func (x *TransactionOptions) Reset()
func (*TransactionOptions) String
func (x *TransactionOptions) String() string
TransactionOptions_PartitionedDml
type TransactionOptions_PartitionedDml struct {
// contains filtered or unexported fields
}
Message type to initiate a Partitioned DML transaction.
func (*TransactionOptions_PartitionedDml) Descriptor
func (*TransactionOptions_PartitionedDml) Descriptor() ([]byte, []int)
Deprecated: Use TransactionOptions_PartitionedDml.ProtoReflect.Descriptor instead.
func (*TransactionOptions_PartitionedDml) ProtoMessage
func (*TransactionOptions_PartitionedDml) ProtoMessage()
func (*TransactionOptions_PartitionedDml) ProtoReflect
func (x *TransactionOptions_PartitionedDml) ProtoReflect() protoreflect.Message
func (*TransactionOptions_PartitionedDml) Reset
func (x *TransactionOptions_PartitionedDml) Reset()
func (*TransactionOptions_PartitionedDml) String
func (x *TransactionOptions_PartitionedDml) String() string
TransactionOptions_PartitionedDml_
type TransactionOptions_PartitionedDml_ struct {
// Partitioned DML transaction.
//
// Authorization to begin a Partitioned DML transaction requires
// `spanner.databases.beginPartitionedDmlTransaction` permission
// on the `session` resource.
PartitionedDml *TransactionOptions_PartitionedDml `protobuf:"bytes,3,opt,name=partitioned_dml,json=partitionedDml,proto3,oneof"`
}
TransactionOptions_ReadOnly
type TransactionOptions_ReadOnly struct {
// How to choose the timestamp for the read-only transaction.
//
// Types that are assignable to TimestampBound:
//
// *TransactionOptions_ReadOnly_Strong
// *TransactionOptions_ReadOnly_MinReadTimestamp
// *TransactionOptions_ReadOnly_MaxStaleness
// *TransactionOptions_ReadOnly_ReadTimestamp
// *TransactionOptions_ReadOnly_ExactStaleness
TimestampBound isTransactionOptions_ReadOnly_TimestampBound `protobuf_oneof:"timestamp_bound"`
// If true, the Cloud Spanner-selected read timestamp is included in
// the [Transaction][google.spanner.v1.Transaction] message that describes
// the transaction.
ReturnReadTimestamp bool `protobuf:"varint,6,opt,name=return_read_timestamp,json=returnReadTimestamp,proto3" json:"return_read_timestamp,omitempty"`
// contains filtered or unexported fields
}
Message type to initiate a read-only transaction.
func (*TransactionOptions_ReadOnly) Descriptor
func (*TransactionOptions_ReadOnly) Descriptor() ([]byte, []int)
Deprecated: Use TransactionOptions_ReadOnly.ProtoReflect.Descriptor instead.
func (*TransactionOptions_ReadOnly) GetExactStaleness
func (x *TransactionOptions_ReadOnly) GetExactStaleness() *durationpb.Duration
func (*TransactionOptions_ReadOnly) GetMaxStaleness
func (x *TransactionOptions_ReadOnly) GetMaxStaleness() *durationpb.Duration
func (*TransactionOptions_ReadOnly) GetMinReadTimestamp
func (x *TransactionOptions_ReadOnly) GetMinReadTimestamp() *timestamppb.Timestamp
func (*TransactionOptions_ReadOnly) GetReadTimestamp
func (x *TransactionOptions_ReadOnly) GetReadTimestamp() *timestamppb.Timestamp
func (*TransactionOptions_ReadOnly) GetReturnReadTimestamp
func (x *TransactionOptions_ReadOnly) GetReturnReadTimestamp() bool
func (*TransactionOptions_ReadOnly) GetStrong
func (x *TransactionOptions_ReadOnly) GetStrong() bool
func (*TransactionOptions_ReadOnly) GetTimestampBound
func (m *TransactionOptions_ReadOnly) GetTimestampBound() isTransactionOptions_ReadOnly_TimestampBound
func (*TransactionOptions_ReadOnly) ProtoMessage
func (*TransactionOptions_ReadOnly) ProtoMessage()
func (*TransactionOptions_ReadOnly) ProtoReflect
func (x *TransactionOptions_ReadOnly) ProtoReflect() protoreflect.Message
func (*TransactionOptions_ReadOnly) Reset
func (x *TransactionOptions_ReadOnly) Reset()
func (*TransactionOptions_ReadOnly) String
func (x *TransactionOptions_ReadOnly) String() string
TransactionOptions_ReadOnly_
type TransactionOptions_ReadOnly_ struct {
// Transaction will not write.
//
// Authorization to begin a read-only transaction requires
// `spanner.databases.beginReadOnlyTransaction` permission
// on the `session` resource.
ReadOnly *TransactionOptions_ReadOnly `protobuf:"bytes,2,opt,name=read_only,json=readOnly,proto3,oneof"`
}
TransactionOptions_ReadOnly_ExactStaleness
type TransactionOptions_ReadOnly_ExactStaleness struct {
// Executes all reads at a timestamp that is `exact_staleness`
// old. The timestamp is chosen soon after the read is started.
//
// Guarantees that all writes that have committed more than the
// specified number of seconds ago are visible. Because Cloud Spanner
// chooses the exact timestamp, this mode works even if the client's
// local clock is substantially skewed from Cloud Spanner commit
// timestamps.
//
// Useful for reading at nearby replicas without the distributed
// timestamp negotiation overhead of `max_staleness`.
ExactStaleness *durationpb.Duration `protobuf:"bytes,5,opt,name=exact_staleness,json=exactStaleness,proto3,oneof"`
}
TransactionOptions_ReadOnly_MaxStaleness
type TransactionOptions_ReadOnly_MaxStaleness struct {
// Read data at a timestamp >= `NOW - max_staleness`
// seconds. Guarantees that all writes that have committed more
// than the specified number of seconds ago are visible. Because
// Cloud Spanner chooses the exact timestamp, this mode works even if
// the client's local clock is substantially skewed from Cloud Spanner
// commit timestamps.
//
// Useful for reading the freshest data available at a nearby
// replica, while bounding the possible staleness if the local
// replica has fallen behind.
//
// Note that this option can only be used in single-use
// transactions.
MaxStaleness *durationpb.Duration `protobuf:"bytes,3,opt,name=max_staleness,json=maxStaleness,proto3,oneof"`
}
TransactionOptions_ReadOnly_MinReadTimestamp
type TransactionOptions_ReadOnly_MinReadTimestamp struct {
// Executes all reads at a timestamp >= `min_read_timestamp`.
//
// This is useful for requesting fresher data than some previous
// read, or data that is fresh enough to observe the effects of some
// previously committed transaction whose timestamp is known.
//
// Note that this option can only be used in single-use transactions.
//
// A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
// Example: `"2014-10-02T15:01:23.045123456Z"`.
MinReadTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=min_read_timestamp,json=minReadTimestamp,proto3,oneof"`
}
TransactionOptions_ReadOnly_ReadTimestamp
type TransactionOptions_ReadOnly_ReadTimestamp struct {
// Executes all reads at the given timestamp. Unlike other modes,
// reads at a specific timestamp are repeatable; the same read at
// the same timestamp always returns the same data. If the
// timestamp is in the future, the read will block until the
// specified timestamp, modulo the read's deadline.
//
// Useful for large scale consistent reads such as mapreduces, or
// for coordinating many reads against a consistent snapshot of the
// data.
//
// A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
// Example: `"2014-10-02T15:01:23.045123456Z"`.
ReadTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=read_timestamp,json=readTimestamp,proto3,oneof"`
}
TransactionOptions_ReadOnly_Strong
type TransactionOptions_ReadOnly_Strong struct {
// Read at a timestamp where all previously committed transactions
// are visible.
Strong bool `protobuf:"varint,1,opt,name=strong,proto3,oneof"`
}
TransactionOptions_ReadWrite
type TransactionOptions_ReadWrite struct {
ReadLockMode TransactionOptions_ReadWrite_ReadLockMode "" /* 165 byte string literal not displayed */
}
Message type to initiate a read-write transaction. Currently this transaction type has no options.
func (*TransactionOptions_ReadWrite) Descriptor
func (*TransactionOptions_ReadWrite) Descriptor() ([]byte, []int)
Deprecated: Use TransactionOptions_ReadWrite.ProtoReflect.Descriptor instead.
func (*TransactionOptions_ReadWrite) GetReadLockMode
func (x *TransactionOptions_ReadWrite) GetReadLockMode() TransactionOptions_ReadWrite_ReadLockMode
func (*TransactionOptions_ReadWrite) ProtoMessage
func (*TransactionOptions_ReadWrite) ProtoMessage()
func (*TransactionOptions_ReadWrite) ProtoReflect
func (x *TransactionOptions_ReadWrite) ProtoReflect() protoreflect.Message
func (*TransactionOptions_ReadWrite) Reset
func (x *TransactionOptions_ReadWrite) Reset()
func (*TransactionOptions_ReadWrite) String
func (x *TransactionOptions_ReadWrite) String() string
TransactionOptions_ReadWrite_
type TransactionOptions_ReadWrite_ struct {
// Transaction may write.
//
// Authorization to begin a read-write transaction requires
// `spanner.databases.beginOrRollbackReadWriteTransaction` permission
// on the `session` resource.
ReadWrite *TransactionOptions_ReadWrite `protobuf:"bytes,1,opt,name=read_write,json=readWrite,proto3,oneof"`
}
TransactionOptions_ReadWrite_ReadLockMode
type TransactionOptions_ReadWrite_ReadLockMode int32
ReadLockMode
is used to set the read lock mode for read-write
transactions.
TransactionOptions_ReadWrite_READ_LOCK_MODE_UNSPECIFIED, TransactionOptions_ReadWrite_PESSIMISTIC, TransactionOptions_ReadWrite_OPTIMISTIC
const (
// Default value.
//
// If the value is not specified, the pessimistic read lock is used.
TransactionOptions_ReadWrite_READ_LOCK_MODE_UNSPECIFIED TransactionOptions_ReadWrite_ReadLockMode = 0
// Pessimistic lock mode.
//
// Read locks are acquired immediately on read.
TransactionOptions_ReadWrite_PESSIMISTIC TransactionOptions_ReadWrite_ReadLockMode = 1
// Optimistic lock mode.
//
// Locks for reads within the transaction are not acquired on read.
// Instead the locks are acquired on a commit to validate that
// read/queried data has not changed since the transaction started.
TransactionOptions_ReadWrite_OPTIMISTIC TransactionOptions_ReadWrite_ReadLockMode = 2
)
func (TransactionOptions_ReadWrite_ReadLockMode) Descriptor
func (TransactionOptions_ReadWrite_ReadLockMode) Descriptor() protoreflect.EnumDescriptor
func (TransactionOptions_ReadWrite_ReadLockMode) Enum
func (x TransactionOptions_ReadWrite_ReadLockMode) Enum() *TransactionOptions_ReadWrite_ReadLockMode
func (TransactionOptions_ReadWrite_ReadLockMode) EnumDescriptor
func (TransactionOptions_ReadWrite_ReadLockMode) EnumDescriptor() ([]byte, []int)
Deprecated: Use TransactionOptions_ReadWrite_ReadLockMode.Descriptor instead.
func (TransactionOptions_ReadWrite_ReadLockMode) Number
func (x TransactionOptions_ReadWrite_ReadLockMode) Number() protoreflect.EnumNumber
func (TransactionOptions_ReadWrite_ReadLockMode) String
func (x TransactionOptions_ReadWrite_ReadLockMode) String() string
func (TransactionOptions_ReadWrite_ReadLockMode) Type
func (TransactionOptions_ReadWrite_ReadLockMode) Type() protoreflect.EnumType
TransactionSelector
type TransactionSelector struct {
// If no fields are set, the default is a single use transaction
// with strong concurrency.
//
// Types that are assignable to Selector:
//
// *TransactionSelector_SingleUse
// *TransactionSelector_Id
// *TransactionSelector_Begin
Selector isTransactionSelector_Selector `protobuf_oneof:"selector"`
// contains filtered or unexported fields
}
This message is used to select the transaction in which a [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs.
See [TransactionOptions][google.spanner.v1.TransactionOptions] for more information about transactions.
func (*TransactionSelector) Descriptor
func (*TransactionSelector) Descriptor() ([]byte, []int)
Deprecated: Use TransactionSelector.ProtoReflect.Descriptor instead.
func (*TransactionSelector) GetBegin
func (x *TransactionSelector) GetBegin() *TransactionOptions
func (*TransactionSelector) GetId
func (x *TransactionSelector) GetId() []byte
func (*TransactionSelector) GetSelector
func (m *TransactionSelector) GetSelector() isTransactionSelector_Selector
func (*TransactionSelector) GetSingleUse
func (x *TransactionSelector) GetSingleUse() *TransactionOptions
func (*TransactionSelector) ProtoMessage
func (*TransactionSelector) ProtoMessage()
func (*TransactionSelector) ProtoReflect
func (x *TransactionSelector) ProtoReflect() protoreflect.Message
func (*TransactionSelector) Reset
func (x *TransactionSelector) Reset()
func (*TransactionSelector) String
func (x *TransactionSelector) String() string
TransactionSelector_Begin
type TransactionSelector_Begin struct {
// Begin a new transaction and execute this read or SQL query in
// it. The transaction ID of the new transaction is returned in
// [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
// which is a [Transaction][google.spanner.v1.Transaction].
Begin *TransactionOptions `protobuf:"bytes,3,opt,name=begin,proto3,oneof"`
}
TransactionSelector_Id
type TransactionSelector_Id struct {
// Execute the read or SQL query in a previously-started transaction.
Id []byte `protobuf:"bytes,2,opt,name=id,proto3,oneof"`
}
TransactionSelector_SingleUse
type TransactionSelector_SingleUse struct {
// Execute the read or SQL query in a temporary transaction.
// This is the most efficient way to execute a transaction that
// consists of a single SQL query.
SingleUse *TransactionOptions `protobuf:"bytes,1,opt,name=single_use,json=singleUse,proto3,oneof"`
}
Type
type Type struct {
Code TypeCode `protobuf:"varint,1,opt,name=code,proto3,enum=google.spanner.v1.TypeCode" json:"code,omitempty"`
ArrayElementType *Type `protobuf:"bytes,2,opt,name=array_element_type,json=arrayElementType,proto3" json:"array_element_type,omitempty"`
StructType *StructType `protobuf:"bytes,3,opt,name=struct_type,json=structType,proto3" json:"struct_type,omitempty"`
TypeAnnotation TypeAnnotationCode "" /* 146 byte string literal not displayed */
ProtoTypeFqn string `protobuf:"bytes,5,opt,name=proto_type_fqn,json=protoTypeFqn,proto3" json:"proto_type_fqn,omitempty"`
}
Type
indicates the type of a Cloud Spanner value, as might be stored in a
table cell or returned from an SQL query.
func (*Type) Descriptor
Deprecated: Use Type.ProtoReflect.Descriptor instead.
func (*Type) GetArrayElementType
func (*Type) GetCode
func (*Type) GetProtoTypeFqn
func (*Type) GetStructType
func (x *Type) GetStructType() *StructType
func (*Type) GetTypeAnnotation
func (x *Type) GetTypeAnnotation() TypeAnnotationCode
func (*Type) ProtoMessage
func (*Type) ProtoMessage()
func (*Type) ProtoReflect
func (x *Type) ProtoReflect() protoreflect.Message
func (*Type) Reset
func (x *Type) Reset()
func (*Type) String
TypeAnnotationCode
type TypeAnnotationCode int32
TypeAnnotationCode
is used as a part of [Type][google.spanner.v1.Type] to
disambiguate SQL types that should be used for a given Cloud Spanner value.
Disambiguation is needed because the same Cloud Spanner type can be mapped to
different SQL types depending on SQL dialect. TypeAnnotationCode doesn't
affect the way value is serialized.
TypeAnnotationCode_TYPE_ANNOTATION_CODE_UNSPECIFIED, TypeAnnotationCode_PG_NUMERIC, TypeAnnotationCode_PG_JSONB, TypeAnnotationCode_PG_OID
const (
// Not specified.
TypeAnnotationCode_TYPE_ANNOTATION_CODE_UNSPECIFIED TypeAnnotationCode = 0
// PostgreSQL compatible NUMERIC type. This annotation needs to be applied to
// [Type][google.spanner.v1.Type] instances having [NUMERIC][google.spanner.v1.TypeCode.NUMERIC]
// type code to specify that values of this type should be treated as
// PostgreSQL NUMERIC values. Currently this annotation is always needed for
// [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with PostgreSQL-enabled
// Spanner databases.
TypeAnnotationCode_PG_NUMERIC TypeAnnotationCode = 2
// PostgreSQL compatible JSONB type. This annotation needs to be applied to
// [Type][google.spanner.v1.Type] instances having [JSON][google.spanner.v1.TypeCode.JSON]
// type code to specify that values of this type should be treated as
// PostgreSQL JSONB values. Currently this annotation is always needed for
// [JSON][google.spanner.v1.TypeCode.JSON] when a client interacts with PostgreSQL-enabled
// Spanner databases.
TypeAnnotationCode_PG_JSONB TypeAnnotationCode = 3
// PostgreSQL compatible OID type. This annotation can be used by a client
// interacting with PostgreSQL-enabled Spanner database to specify that a
// value should be treated using the semantics of the OID type.
TypeAnnotationCode_PG_OID TypeAnnotationCode = 4
)
func (TypeAnnotationCode) Descriptor
func (TypeAnnotationCode) Descriptor() protoreflect.EnumDescriptor
func (TypeAnnotationCode) Enum
func (x TypeAnnotationCode) Enum() *TypeAnnotationCode
func (TypeAnnotationCode) EnumDescriptor
func (TypeAnnotationCode) EnumDescriptor() ([]byte, []int)
Deprecated: Use TypeAnnotationCode.Descriptor instead.
func (TypeAnnotationCode) Number
func (x TypeAnnotationCode) Number() protoreflect.EnumNumber
func (TypeAnnotationCode) String
func (x TypeAnnotationCode) String() string
func (TypeAnnotationCode) Type
func (TypeAnnotationCode) Type() protoreflect.EnumType
TypeCode
type TypeCode int32
TypeCode
is used as part of [Type][google.spanner.v1.Type] to
indicate the type of a Cloud Spanner value.
Each legal value of a type can be encoded to or decoded from a JSON
value, using the encodings described below. All Cloud Spanner values can
be null
, regardless of type; null
s are always encoded as a JSON
null
.
TypeCode_TYPE_CODE_UNSPECIFIED, TypeCode_BOOL, TypeCode_INT64, TypeCode_FLOAT64, TypeCode_FLOAT32, TypeCode_TIMESTAMP, TypeCode_DATE, TypeCode_STRING, TypeCode_BYTES, TypeCode_ARRAY, TypeCode_STRUCT, TypeCode_NUMERIC, TypeCode_JSON, TypeCode_PROTO, TypeCode_ENUM
const (
// Not specified.
TypeCode_TYPE_CODE_UNSPECIFIED TypeCode = 0
// Encoded as JSON `true` or `false`.
TypeCode_BOOL TypeCode = 1
// Encoded as `string`, in decimal format.
TypeCode_INT64 TypeCode = 2
// Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
// `"-Infinity"`.
TypeCode_FLOAT64 TypeCode = 3
// Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
// `"-Infinity"`.
TypeCode_FLOAT32 TypeCode = 15
// Encoded as `string` in RFC 3339 timestamp format. The time zone
// must be present, and must be `"Z"`.
//
// If the schema has the column option
// `allow_commit_timestamp=true`, the placeholder string
// `"spanner.commit_timestamp()"` can be used to instruct the system
// to insert the commit timestamp associated with the transaction
// commit.
TypeCode_TIMESTAMP TypeCode = 4
// Encoded as `string` in RFC 3339 date format.
TypeCode_DATE TypeCode = 5
// Encoded as `string`.
TypeCode_STRING TypeCode = 6
// Encoded as a base64-encoded `string`, as described in RFC 4648,
// section 4.
TypeCode_BYTES TypeCode = 7
// Encoded as `list`, where the list elements are represented
// according to
// [array_element_type][google.spanner.v1.Type.array_element_type].
TypeCode_ARRAY TypeCode = 8
// Encoded as `list`, where list element `i` is represented according
// to [struct_type.fields[i]][google.spanner.v1.StructType.fields].
TypeCode_STRUCT TypeCode = 9
// Encoded as `string`, in decimal format or scientific notation format.
//
Decimal format:
//
`[+-]Digits[.[Digits]]` or
//
`[+-][Digits].Digits`
//
// Scientific notation:
//
`[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or
//
`[+-][Digits].Digits[ExponentIndicator[+-]Digits]`
//
(ExponentIndicator is `"e"` or `"E"`)
TypeCode_NUMERIC TypeCode = 10
// Encoded as a JSON-formatted `string` as described in RFC 7159. The
// following rules are applied when parsing JSON input:
//
// - Whitespace characters are not preserved.
// - If a JSON object has duplicate keys, only the first key is preserved.
// - Members of a JSON object are not guaranteed to have their order
// preserved.
// - JSON array elements will have their order preserved.
TypeCode_JSON TypeCode = 11
// Encoded as a base64-encoded `string`, as described in RFC 4648,
// section 4.
TypeCode_PROTO TypeCode = 13
// Encoded as `string`, in decimal format.
TypeCode_ENUM TypeCode = 14
)
func (TypeCode) Descriptor
func (TypeCode) Descriptor() protoreflect.EnumDescriptor
func (TypeCode) Enum
func (TypeCode) EnumDescriptor
Deprecated: Use TypeCode.Descriptor instead.
func (TypeCode) Number
func (x TypeCode) Number() protoreflect.EnumNumber
func (TypeCode) String
func (TypeCode) Type
func (TypeCode) Type() protoreflect.EnumType
UnimplementedSpannerServer
type UnimplementedSpannerServer struct {
}
UnimplementedSpannerServer can be embedded to have forward compatible implementations.
func (*UnimplementedSpannerServer) BatchCreateSessions
func (*UnimplementedSpannerServer) BatchCreateSessions(context.Context, *BatchCreateSessionsRequest) (*BatchCreateSessionsResponse, error)
func (*UnimplementedSpannerServer) BatchWrite
func (*UnimplementedSpannerServer) BatchWrite(*BatchWriteRequest, Spanner_BatchWriteServer) error
func (*UnimplementedSpannerServer) BeginTransaction
func (*UnimplementedSpannerServer) BeginTransaction(context.Context, *BeginTransactionRequest) (*Transaction, error)
func (*UnimplementedSpannerServer) Commit
func (*UnimplementedSpannerServer) Commit(context.Context, *CommitRequest) (*CommitResponse, error)
func (*UnimplementedSpannerServer) CreateSession
func (*UnimplementedSpannerServer) CreateSession(context.Context, *CreateSessionRequest) (*Session, error)
func (*UnimplementedSpannerServer) DeleteSession
func (*UnimplementedSpannerServer) DeleteSession(context.Context, *DeleteSessionRequest) (*emptypb.Empty, error)
func (*UnimplementedSpannerServer) ExecuteBatchDml
func (*UnimplementedSpannerServer) ExecuteBatchDml(context.Context, *ExecuteBatchDmlRequest) (*ExecuteBatchDmlResponse, error)
func (*UnimplementedSpannerServer) ExecuteSql
func (*UnimplementedSpannerServer) ExecuteSql(context.Context, *ExecuteSqlRequest) (*ResultSet, error)
func (*UnimplementedSpannerServer) ExecuteStreamingSql
func (*UnimplementedSpannerServer) ExecuteStreamingSql(*ExecuteSqlRequest, Spanner_ExecuteStreamingSqlServer) error
func (*UnimplementedSpannerServer) GetSession
func (*UnimplementedSpannerServer) GetSession(context.Context, *GetSessionRequest) (*Session, error)
func (*UnimplementedSpannerServer) ListSessions
func (*UnimplementedSpannerServer) ListSessions(context.Context, *ListSessionsRequest) (*ListSessionsResponse, error)
func (*UnimplementedSpannerServer) PartitionQuery
func (*UnimplementedSpannerServer) PartitionQuery(context.Context, *PartitionQueryRequest) (*PartitionResponse, error)
func (*UnimplementedSpannerServer) PartitionRead
func (*UnimplementedSpannerServer) PartitionRead(context.Context, *PartitionReadRequest) (*PartitionResponse, error)
func (*UnimplementedSpannerServer) Read
func (*UnimplementedSpannerServer) Read(context.Context, *ReadRequest) (*ResultSet, error)
func (*UnimplementedSpannerServer) Rollback
func (*UnimplementedSpannerServer) Rollback(context.Context, *RollbackRequest) (*emptypb.Empty, error)
func (*UnimplementedSpannerServer) StreamingRead
func (*UnimplementedSpannerServer) StreamingRead(*ReadRequest, Spanner_StreamingReadServer) error