Class: Google::Cloud::Bigquery::Storage::V1::ReadSession
- Inherits:
-
Object
- Object
- Google::Cloud::Bigquery::Storage::V1::ReadSession
- Extended by:
- Protobuf::MessageExts::ClassMethods
- Includes:
- Protobuf::MessageExts
- Defined in:
- proto_docs/google/cloud/bigquery/storage/v1/stream.rb
Overview
Information about the ReadSession.
Defined Under Namespace
Classes: TableModifiers, TableReadOptions
Instance Attribute Summary collapse
-
#arrow_schema ⇒ ::Google::Cloud::Bigquery::Storage::V1::ArrowSchema
readonly
Output only.
-
#avro_schema ⇒ ::Google::Cloud::Bigquery::Storage::V1::AvroSchema
readonly
Output only.
-
#data_format ⇒ ::Google::Cloud::Bigquery::Storage::V1::DataFormat
Immutable.
-
#estimated_row_count ⇒ ::Integer
readonly
Output only.
-
#estimated_total_bytes_scanned ⇒ ::Integer
readonly
Output only.
-
#estimated_total_physical_file_size ⇒ ::Integer
readonly
Output only.
-
#expire_time ⇒ ::Google::Protobuf::Timestamp
readonly
Output only.
-
#name ⇒ ::String
readonly
Output only.
-
#read_options ⇒ ::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions
Optional.
-
#streams ⇒ ::Array<::Google::Cloud::Bigquery::Storage::V1::ReadStream>
readonly
Output only.
-
#table ⇒ ::String
Immutable.
-
#table_modifiers ⇒ ::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableModifiers
Optional.
-
#trace_id ⇒ ::String
Optional.
Instance Attribute Details
#arrow_schema ⇒ ::Google::Cloud::Bigquery::Storage::V1::ArrowSchema (readonly)
Returns Output only. Arrow schema.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#avro_schema ⇒ ::Google::Cloud::Bigquery::Storage::V1::AvroSchema (readonly)
Returns Output only. Avro schema.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#data_format ⇒ ::Google::Cloud::Bigquery::Storage::V1::DataFormat
Returns Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not supported.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#estimated_row_count ⇒ ::Integer (readonly)
Returns Output only. An estimate on the number of rows present in this session's streams. This estimate is based on metadata from the table which might be incomplete or stale.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#estimated_total_bytes_scanned ⇒ ::Integer (readonly)
Returns Output only. An estimate on the number of bytes this session will scan when all streams are completely consumed. This estimate is based on metadata from the table which might be incomplete or stale.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#estimated_total_physical_file_size ⇒ ::Integer (readonly)
Returns Output only. A pre-projected estimate of the total physical size of files (in bytes) that this session will scan when all streams are consumed. This estimate is independent of the selected columns and can be based on incomplete or stale metadata from the table. This field is only set for BigLake tables.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#expire_time ⇒ ::Google::Protobuf::Timestamp (readonly)
Returns Output only. Time at which the session becomes invalid. After this time, subsequent requests to read this Session will return errors. The expire_time is automatically assigned and currently cannot be specified or updated.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#name ⇒ ::String (readonly)
Returns Output only. Unique identifier for the session, in the form
projects/{project_id}/locations/{location}/sessions/{session_id}
.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#read_options ⇒ ::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions
Returns Optional. Read options for this session (e.g. column selection, filters).
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#streams ⇒ ::Array<::Google::Cloud::Bigquery::Storage::V1::ReadStream> (readonly)
Returns Output only. A list of streams created with the session.
At least one stream is created with the session. In the future, larger request_stream_count values may result in this list being unpopulated, in that case, the user will need to use a List method to get the streams instead, which is not yet available.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#table ⇒ ::String
Returns Immutable. Table that this ReadSession is reading from, in the form
projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#table_modifiers ⇒ ::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableModifiers
Returns Optional. Any modifiers which are applied when reading from the specified table.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |
#trace_id ⇒ ::String
Returns Optional. ID set by client to annotate a session identity. This does not need to be strictly unique, but instead the same ID should be used to group logically connected sessions (e.g. All using the same ID for all sessions needed to complete a Spark SQL query is reasonable).
Maximum length is 256 bytes.
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# File 'proto_docs/google/cloud/bigquery/storage/v1/stream.rb', line 90 class ReadSession include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Additional attributes when reading a table. # @!attribute [rw] snapshot_time # @return [::Google::Protobuf::Timestamp] # The snapshot time of the table. If not set, interpreted as now. class TableModifiers include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Options dictating how we read a table. # @!attribute [rw] selected_fields # @return [::Array<::String>] # Optional. The names of the fields in the table to be returned. If no # field names are specified, then all fields in the table are returned. # # Nested fields -- the child elements of a STRUCT field -- can be selected # individually using their fully-qualified names, and will be returned as # record fields containing only the selected nested fields. If a STRUCT # field is specified in the selected fields list, all of the child elements # will be returned. # # As an example, consider a table with the following schema: # # { # "name": "struct_field", # "type": "RECORD", # "mode": "NULLABLE", # "fields": [ # { # "name": "string_field1", # "type": "STRING", # . "mode": "NULLABLE" # }, # { # "name": "string_field2", # "type": "STRING", # "mode": "NULLABLE" # } # ] # } # # Specifying "struct_field" in the selected fields list will result in a # read session schema with the following logical structure: # # struct_field { # string_field1 # string_field2 # } # # Specifying "struct_field.string_field1" in the selected fields list will # result in a read session schema with the following logical structure: # # struct_field { # string_field1 # } # # The order of the fields in the read session schema is derived from the # table schema and does not correspond to the order in which the fields are # specified in this list. # @!attribute [rw] row_restriction # @return [::String] # SQL text filtering statement, similar to a WHERE clause in a query. # Aggregates are not supported. # # Examples: "int_field > 5" # "date_field = CAST('2014-9-27' as DATE)" # "nullable_field is not NULL" # "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" # "numeric_field BETWEEN 1.0 AND 5.0" # # Restricted to a maximum length for 1 MB. # @!attribute [rw] arrow_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::ArrowSerializationOptions] # Optional. Options specific to the Apache Arrow output format. # @!attribute [rw] avro_serialization_options # @return [::Google::Cloud::Bigquery::Storage::V1::AvroSerializationOptions] # Optional. Options specific to the Apache Avro output format # @!attribute [rw] sample_percentage # @return [::Float] # Optional. Specifies a table sampling percentage. Specifically, the query # planner will use TABLESAMPLE SYSTEM (sample_percentage PERCENT). The # sampling percentage is applied at the data block granularity. It will # randomly choose for each data block whether to read the rows in that data # block. For more details, see # https://cloud.google.com/bigquery/docs/table-sampling) # @!attribute [rw] response_compression_codec # @return [::Google::Cloud::Bigquery::Storage::V1::ReadSession::TableReadOptions::ResponseCompressionCodec] # Optional. Set response_compression_codec when creating a read session to # enable application-level compression of ReadRows responses. class TableReadOptions include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Specifies which compression codec to attempt on the entire serialized # response payload (either Arrow record batch or Avro rows). This is # not to be confused with the Apache Arrow native compression codecs # specified in ArrowSerializationOptions. For performance reasons, when # creating a read session requesting Arrow responses, setting both native # Arrow compression and application-level response compression will not be # allowed - choose, at most, one kind of compression. module ResponseCompressionCodec # Default is no compression. RESPONSE_COMPRESSION_CODEC_UNSPECIFIED = 0 # Use raw LZ4 compression. RESPONSE_COMPRESSION_CODEC_LZ4 = 2 end end end |