Class: Google::Apis::AiplatformV1::LearningGenaiRootHarm

Inherits:
Object
  • Object
show all
Includes:
Core::Hashable, Core::JsonObjectSupport
Defined in:
lib/google/apis/aiplatform_v1/classes.rb,
lib/google/apis/aiplatform_v1/representations.rb,
lib/google/apis/aiplatform_v1/representations.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(**args) ⇒ LearningGenaiRootHarm

Returns a new instance of LearningGenaiRootHarm.



30329
30330
30331
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30329

def initialize(**args)
   update!(**args)
end

Instance Attribute Details

#contextual_dangerousBoolean Also known as: contextual_dangerous?

Please do not use, this is still under development. Corresponds to the JSON property contextualDangerous

Returns:

  • (Boolean)


30233
30234
30235
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30233

def contextual_dangerous
  @contextual_dangerous
end

#csamBoolean Also known as: csam?

Corresponds to the JSON property csam

Returns:

  • (Boolean)


30239
30240
30241
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30239

def csam
  @csam
end

#fringeBoolean Also known as: fringe?

Corresponds to the JSON property fringe

Returns:

  • (Boolean)


30245
30246
30247
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30245

def fringe
  @fringe
end

#grail_image_harm_typeGoogle::Apis::AiplatformV1::LearningGenaiRootHarmGrailImageHarmType

Harm type for images Corresponds to the JSON property grailImageHarmType



30251
30252
30253
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30251

def grail_image_harm_type
  @grail_image_harm_type
end

#grail_text_harm_typeGoogle::Apis::AiplatformV1::LearningGenaiRootHarmGrailTextHarmType

Harm type for text Corresponds to the JSON property grailTextHarmType



30256
30257
30258
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30256

def grail_text_harm_type
  @grail_text_harm_type
end

#image_csamBoolean Also known as: image_csam?

Corresponds to the JSON property imageCsam

Returns:

  • (Boolean)


30261
30262
30263
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30261

def image_csam
  @image_csam
end

#image_pedoBoolean Also known as: image_pedo?

Corresponds to the JSON property imagePedo

Returns:

  • (Boolean)


30267
30268
30269
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30267

def image_pedo
  @image_pedo
end

#image_pornBoolean Also known as: image_porn?

Image signals Corresponds to the JSON property imagePorn

Returns:

  • (Boolean)


30273
30274
30275
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30273

def image_porn
  @image_porn
end

#image_violenceBoolean Also known as: image_violence?

Corresponds to the JSON property imageViolence

Returns:

  • (Boolean)


30279
30280
30281
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30279

def image_violence
  @image_violence
end

#pqcBoolean Also known as: pqc?

Corresponds to the JSON property pqc

Returns:

  • (Boolean)


30285
30286
30287
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30285

def pqc
  @pqc
end

#safetycatGoogle::Apis::AiplatformV1::LearningGenaiRootHarmSafetyCatCategories

Corresponds to the JSON property safetycat



30291
30292
30293
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30291

def safetycat
  @safetycat
end

#spiiGoogle::Apis::AiplatformV1::LearningGenaiRootHarmSpiiFilter

Spii Filter uses buckets http://google3/google/privacy/dlp/v2/storage.proto;l= 77;rcl=584719820 to classify the input. LMRoot converts the bucket into double score. For example the score for "POSSIBLE" is 3 / 5 = 0.6 . Corresponds to the JSON property spii



30298
30299
30300
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30298

def spii
  @spii
end

#thresholdFloat

Corresponds to the JSON property threshold

Returns:

  • (Float)


30303
30304
30305
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30303

def threshold
  @threshold
end

#video_frame_csamBoolean Also known as: video_frame_csam?

Corresponds to the JSON property videoFrameCsam

Returns:

  • (Boolean)


30308
30309
30310
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30308

def video_frame_csam
  @video_frame_csam
end

#video_frame_pedoBoolean Also known as: video_frame_pedo?

Corresponds to the JSON property videoFramePedo

Returns:

  • (Boolean)


30314
30315
30316
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30314

def video_frame_pedo
  @video_frame_pedo
end

#video_frame_pornBoolean Also known as: video_frame_porn?

Video frame signals Corresponds to the JSON property videoFramePorn

Returns:

  • (Boolean)


30320
30321
30322
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30320

def video_frame_porn
  @video_frame_porn
end

#video_frame_violenceBoolean Also known as: video_frame_violence?

Corresponds to the JSON property videoFrameViolence

Returns:

  • (Boolean)


30326
30327
30328
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30326

def video_frame_violence
  @video_frame_violence
end

Instance Method Details

#update!(**args) ⇒ Object

Update properties of this object



30334
30335
30336
30337
30338
30339
30340
30341
30342
30343
30344
30345
30346
30347
30348
30349
30350
30351
30352
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30334

def update!(**args)
  @contextual_dangerous = args[:contextual_dangerous] if args.key?(:contextual_dangerous)
  @csam = args[:csam] if args.key?(:csam)
  @fringe = args[:fringe] if args.key?(:fringe)
  @grail_image_harm_type = args[:grail_image_harm_type] if args.key?(:grail_image_harm_type)
  @grail_text_harm_type = args[:grail_text_harm_type] if args.key?(:grail_text_harm_type)
  @image_csam = args[:image_csam] if args.key?(:image_csam)
  @image_pedo = args[:image_pedo] if args.key?(:image_pedo)
  @image_porn = args[:image_porn] if args.key?(:image_porn)
  @image_violence = args[:image_violence] if args.key?(:image_violence)
  @pqc = args[:pqc] if args.key?(:pqc)
  @safetycat = args[:safetycat] if args.key?(:safetycat)
  @spii = args[:spii] if args.key?(:spii)
  @threshold = args[:threshold] if args.key?(:threshold)
  @video_frame_csam = args[:video_frame_csam] if args.key?(:video_frame_csam)
  @video_frame_pedo = args[:video_frame_pedo] if args.key?(:video_frame_pedo)
  @video_frame_porn = args[:video_frame_porn] if args.key?(:video_frame_porn)
  @video_frame_violence = args[:video_frame_violence] if args.key?(:video_frame_violence)
end