Class: Google::Cloud::Notebooks::V1::ExecutionTemplate

Inherits:
Object
  • Object
show all
Extended by:
Protobuf::MessageExts::ClassMethods
Includes:
Protobuf::MessageExts
Defined in:
proto_docs/google/cloud/notebooks/v1/execution.rb

Overview

The description a notebook execution workload.

Defined Under Namespace

Modules: JobType, ScaleTier, SchedulerAcceleratorType Classes: DataprocParameters, LabelsEntry, SchedulerAcceleratorConfig, VertexAIParameters

Instance Attribute Summary collapse

Instance Attribute Details

#accelerator_config::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorConfig

Returns Configuration (count and accelerator type) for hardware running notebook execution.

Returns:



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#container_image_uri::String

Returns Container Image URI to a DLVM Example: 'gcr.io/deeplearning-platform-release/base-cu100' More examples can be found at: https://cloud.google.com/ai-platform/deep-learning-containers/docs/choosing-container.

Returns:



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#dataproc_parameters::Google::Cloud::Notebooks::V1::ExecutionTemplate::DataprocParameters

Returns Parameters used in Dataproc JobType executions.

Returns:



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#input_notebook_file::String

Returns Path to the notebook file to execute. Must be in a Google Cloud Storage bucket. Format: gs://{bucket_name}/{folder}/{notebook_file_name} Ex: gs://notebook_user/scheduled_notebooks/sentiment_notebook.ipynb.

Returns:

  • (::String)

    Path to the notebook file to execute. Must be in a Google Cloud Storage bucket. Format: gs://{bucket_name}/{folder}/{notebook_file_name} Ex: gs://notebook_user/scheduled_notebooks/sentiment_notebook.ipynb



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#job_type::Google::Cloud::Notebooks::V1::ExecutionTemplate::JobType

Returns The type of Job to be used on this execution.

Returns:



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#kernel_spec::String

Returns Name of the kernel spec to use. This must be specified if the kernel spec name on the execution target does not match the name in the input notebook file.

Returns:

  • (::String)

    Name of the kernel spec to use. This must be specified if the kernel spec name on the execution target does not match the name in the input notebook file.



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#labels::Google::Protobuf::Map{::String => ::String}

Returns Labels for execution. If execution is scheduled, a field included will be 'nbs-scheduled'. Otherwise, it is an immediate execution, and an included field will be 'nbs-immediate'. Use fields to efficiently index between various types of executions.

Returns:

  • (::Google::Protobuf::Map{::String => ::String})

    Labels for execution. If execution is scheduled, a field included will be 'nbs-scheduled'. Otherwise, it is an immediate execution, and an included field will be 'nbs-immediate'. Use fields to efficiently index between various types of executions.



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#master_type::String

Returns Specifies the type of virtual machine to use for your training job's master worker. You must specify this field when scaleTier is set to CUSTOM.

You can use certain Compute Engine machine types directly in this field. The following types are supported:

  • n1-standard-4
  • n1-standard-8
  • n1-standard-16
  • n1-standard-32
  • n1-standard-64
  • n1-standard-96
  • n1-highmem-2
  • n1-highmem-4
  • n1-highmem-8
  • n1-highmem-16
  • n1-highmem-32
  • n1-highmem-64
  • n1-highmem-96
  • n1-highcpu-16
  • n1-highcpu-32
  • n1-highcpu-64
  • n1-highcpu-96

Alternatively, you can use the following legacy machine types:

  • standard
  • large_model
  • complex_model_s
  • complex_model_m
  • complex_model_l
  • standard_gpu
  • complex_model_m_gpu
  • complex_model_l_gpu
  • standard_p100
  • complex_model_m_p100
  • standard_v100
  • large_model_v100
  • complex_model_m_v100
  • complex_model_l_v100

Finally, if you want to use a TPU for training, specify cloud_tpu in this field. Learn more about the special configuration options for training with TPU.

Returns:

  • (::String)

    Specifies the type of virtual machine to use for your training job's master worker. You must specify this field when scaleTier is set to CUSTOM.

    You can use certain Compute Engine machine types directly in this field. The following types are supported:

    • n1-standard-4
    • n1-standard-8
    • n1-standard-16
    • n1-standard-32
    • n1-standard-64
    • n1-standard-96
    • n1-highmem-2
    • n1-highmem-4
    • n1-highmem-8
    • n1-highmem-16
    • n1-highmem-32
    • n1-highmem-64
    • n1-highmem-96
    • n1-highcpu-16
    • n1-highcpu-32
    • n1-highcpu-64
    • n1-highcpu-96

    Alternatively, you can use the following legacy machine types:

    • standard
    • large_model
    • complex_model_s
    • complex_model_m
    • complex_model_l
    • standard_gpu
    • complex_model_m_gpu
    • complex_model_l_gpu
    • standard_p100
    • complex_model_m_p100
    • standard_v100
    • large_model_v100
    • complex_model_m_v100
    • complex_model_l_v100

    Finally, if you want to use a TPU for training, specify cloud_tpu in this field. Learn more about the special configuration options for training with TPU.



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#output_notebook_folder::String

Returns Path to the notebook folder to write to. Must be in a Google Cloud Storage bucket path. Format: gs://{bucket_name}/{folder} Ex: gs://notebook_user/scheduled_notebooks.

Returns:

  • (::String)

    Path to the notebook folder to write to. Must be in a Google Cloud Storage bucket path. Format: gs://{bucket_name}/{folder} Ex: gs://notebook_user/scheduled_notebooks



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#parameters::String

Returns Parameters used within the 'input_notebook_file' notebook.

Returns:

  • (::String)

    Parameters used within the 'input_notebook_file' notebook.



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#params_yaml_file::String

Returns Parameters to be overridden in the notebook during execution. Ref https://papermill.readthedocs.io/en/latest/usage-parameterize.html on how to specifying parameters in the input notebook and pass them here in an YAML file. Ex: gs://notebook_user/scheduled_notebooks/sentiment_notebook_params.yaml.

Returns:



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#scale_tier::Google::Cloud::Notebooks::V1::ExecutionTemplate::ScaleTier

Deprecated.

This field is deprecated and may be removed in the next major version update.

Returns Required. Scale tier of the hardware used for notebook execution. DEPRECATED Will be discontinued. As right now only CUSTOM is supported.

Returns:



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#service_account::String

Returns The email address of a service account to use when running the execution. You must have the iam.serviceAccounts.actAs permission for the specified service account.

Returns:

  • (::String)

    The email address of a service account to use when running the execution. You must have the iam.serviceAccounts.actAs permission for the specified service account.



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#tensorboard::String

Returns The name of a Vertex AI [Tensorboard] resource to which this execution will upload Tensorboard logs. Format: projects/{project}/locations/{location}/tensorboards/{tensorboard}.

Returns:

  • (::String)

    The name of a Vertex AI [Tensorboard] resource to which this execution will upload Tensorboard logs. Format: projects/{project}/locations/{location}/tensorboards/{tensorboard}



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end

#vertex_ai_parameters::Google::Cloud::Notebooks::V1::ExecutionTemplate::VertexAIParameters

Returns Parameters used in Vertex AI JobType executions.

Returns:



144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# File 'proto_docs/google/cloud/notebooks/v1/execution.rb', line 144

class ExecutionTemplate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # Definition of a hardware accelerator. Note that not all combinations
  # of `type` and `core_count` are valid. Check [GPUs on
  # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid
  # combination. TPUs are not supported.
  # @!attribute [rw] type
  #   @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType]
  #     Type of this accelerator.
  # @!attribute [rw] core_count
  #   @return [::Integer]
  #     Count of cores of this accelerator.
  class SchedulerAcceleratorConfig
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Dataproc JobType executions.
  # @!attribute [rw] cluster
  #   @return [::String]
  #     URI for cluster used to run Dataproc execution.
  #     Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}`
  class DataprocParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Parameters used in Vertex AI JobType executions.
  # @!attribute [rw] network
  #   @return [::String]
  #     The full name of the Compute Engine
  #     [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
  #     to which the Job should be peered. For example,
  #     `projects/12345/global/networks/myVPC`.
  #     [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert)
  #     is of the form `projects/{project}/global/networks/{network}`.
  #     Where `{project}` is a project number, as in `12345`, and `{network}` is
  #     a network name.
  #
  #     Private services access must already be configured for the network. If
  #     left unspecified, the job is not peered with any network.
  # @!attribute [rw] env
  #   @return [::Google::Protobuf::Map{::String => ::String}]
  #     Environment variables.
  #     At most 100 environment variables can be specified and unique.
  #     Example: `GCP_BUCKET=gs://my-bucket/samples/`
  class VertexAIParameters
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods

    # @!attribute [rw] key
    #   @return [::String]
    # @!attribute [rw] value
    #   @return [::String]
    class EnvEntry
      include ::Google::Protobuf::MessageExts
      extend ::Google::Protobuf::MessageExts::ClassMethods
    end
  end

  # @!attribute [rw] key
  #   @return [::String]
  # @!attribute [rw] value
  #   @return [::String]
  class LabelsEntry
    include ::Google::Protobuf::MessageExts
    extend ::Google::Protobuf::MessageExts::ClassMethods
  end

  # Required. Specifies the machine types, the number of replicas for workers
  # and parameter servers.
  module ScaleTier
    # Unspecified Scale Tier.
    SCALE_TIER_UNSPECIFIED = 0

    # A single worker instance. This tier is suitable for learning how to use
    # Cloud ML, and for experimenting with new models using small datasets.
    BASIC = 1

    # Many workers and a few parameter servers.
    STANDARD_1 = 2

    # A large number of workers with many parameter servers.
    PREMIUM_1 = 3

    # A single worker instance with a K80 GPU.
    BASIC_GPU = 4

    # A single worker instance with a Cloud TPU.
    BASIC_TPU = 5

    # The CUSTOM tier is not a set tier, but rather enables you to use your
    # own cluster specification. When you use this tier, set values to
    # configure your processing cluster according to these guidelines:
    #
    # *   You _must_ set `ExecutionTemplate.masterType` to specify the type
    #     of machine to use for your master node. This is the only required
    #     setting.
    CUSTOM = 6
  end

  # Hardware accelerator types for AI Platform Training jobs.
  module SchedulerAcceleratorType
    # Unspecified accelerator type. Default to no GPU.
    SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0

    # Nvidia Tesla K80 GPU.
    NVIDIA_TESLA_K80 = 1

    # Nvidia Tesla P100 GPU.
    NVIDIA_TESLA_P100 = 2

    # Nvidia Tesla V100 GPU.
    NVIDIA_TESLA_V100 = 3

    # Nvidia Tesla P4 GPU.
    NVIDIA_TESLA_P4 = 4

    # Nvidia Tesla T4 GPU.
    NVIDIA_TESLA_T4 = 5

    # Nvidia Tesla A100 GPU.
    NVIDIA_TESLA_A100 = 10

    # TPU v2.
    TPU_V2 = 6

    # TPU v3.
    TPU_V3 = 7
  end

  # The backend used for this execution.
  module JobType
    # No type specified.
    JOB_TYPE_UNSPECIFIED = 0

    # Custom Job in `aiplatform.googleapis.com`.
    # Default value for an execution.
    VERTEX_AI = 1

    # Run execution on a cluster with Dataproc as a job.
    # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
    DATAPROC = 2
  end
end