Class: Google::Apis::AiplatformV1::LearningGenaiRootHarm

Inherits:
Object
  • Object
show all
Includes:
Core::Hashable, Core::JsonObjectSupport
Defined in:
lib/google/apis/aiplatform_v1/classes.rb,
lib/google/apis/aiplatform_v1/representations.rb,
lib/google/apis/aiplatform_v1/representations.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(**args) ⇒ LearningGenaiRootHarm

Returns a new instance of LearningGenaiRootHarm.



30224
30225
30226
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30224

def initialize(**args)
   update!(**args)
end

Instance Attribute Details

#contextual_dangerousBoolean Also known as: contextual_dangerous?

Please do not use, this is still under development. Corresponds to the JSON property contextualDangerous

Returns:

  • (Boolean)


30128
30129
30130
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30128

def contextual_dangerous
  @contextual_dangerous
end

#csamBoolean Also known as: csam?

Corresponds to the JSON property csam

Returns:

  • (Boolean)


30134
30135
30136
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30134

def csam
  @csam
end

#fringeBoolean Also known as: fringe?

Corresponds to the JSON property fringe

Returns:

  • (Boolean)


30140
30141
30142
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30140

def fringe
  @fringe
end

#grail_image_harm_typeGoogle::Apis::AiplatformV1::LearningGenaiRootHarmGrailImageHarmType

Harm type for images Corresponds to the JSON property grailImageHarmType



30146
30147
30148
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30146

def grail_image_harm_type
  @grail_image_harm_type
end

#grail_text_harm_typeGoogle::Apis::AiplatformV1::LearningGenaiRootHarmGrailTextHarmType

Harm type for text Corresponds to the JSON property grailTextHarmType



30151
30152
30153
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30151

def grail_text_harm_type
  @grail_text_harm_type
end

#image_csamBoolean Also known as: image_csam?

Corresponds to the JSON property imageCsam

Returns:

  • (Boolean)


30156
30157
30158
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30156

def image_csam
  @image_csam
end

#image_pedoBoolean Also known as: image_pedo?

Corresponds to the JSON property imagePedo

Returns:

  • (Boolean)


30162
30163
30164
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30162

def image_pedo
  @image_pedo
end

#image_pornBoolean Also known as: image_porn?

Image signals Corresponds to the JSON property imagePorn

Returns:

  • (Boolean)


30168
30169
30170
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30168

def image_porn
  @image_porn
end

#image_violenceBoolean Also known as: image_violence?

Corresponds to the JSON property imageViolence

Returns:

  • (Boolean)


30174
30175
30176
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30174

def image_violence
  @image_violence
end

#pqcBoolean Also known as: pqc?

Corresponds to the JSON property pqc

Returns:

  • (Boolean)


30180
30181
30182
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30180

def pqc
  @pqc
end

#safetycatGoogle::Apis::AiplatformV1::LearningGenaiRootHarmSafetyCatCategories

Corresponds to the JSON property safetycat



30186
30187
30188
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30186

def safetycat
  @safetycat
end

#spiiGoogle::Apis::AiplatformV1::LearningGenaiRootHarmSpiiFilter

Spii Filter uses buckets http://google3/google/privacy/dlp/v2/storage.proto;l= 77;rcl=584719820 to classify the input. LMRoot converts the bucket into double score. For example the score for "POSSIBLE" is 3 / 5 = 0.6 . Corresponds to the JSON property spii



30193
30194
30195
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30193

def spii
  @spii
end

#thresholdFloat

Corresponds to the JSON property threshold

Returns:

  • (Float)


30198
30199
30200
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30198

def threshold
  @threshold
end

#video_frame_csamBoolean Also known as: video_frame_csam?

Corresponds to the JSON property videoFrameCsam

Returns:

  • (Boolean)


30203
30204
30205
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30203

def video_frame_csam
  @video_frame_csam
end

#video_frame_pedoBoolean Also known as: video_frame_pedo?

Corresponds to the JSON property videoFramePedo

Returns:

  • (Boolean)


30209
30210
30211
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30209

def video_frame_pedo
  @video_frame_pedo
end

#video_frame_pornBoolean Also known as: video_frame_porn?

Video frame signals Corresponds to the JSON property videoFramePorn

Returns:

  • (Boolean)


30215
30216
30217
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30215

def video_frame_porn
  @video_frame_porn
end

#video_frame_violenceBoolean Also known as: video_frame_violence?

Corresponds to the JSON property videoFrameViolence

Returns:

  • (Boolean)


30221
30222
30223
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30221

def video_frame_violence
  @video_frame_violence
end

Instance Method Details

#update!(**args) ⇒ Object

Update properties of this object



30229
30230
30231
30232
30233
30234
30235
30236
30237
30238
30239
30240
30241
30242
30243
30244
30245
30246
30247
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 30229

def update!(**args)
  @contextual_dangerous = args[:contextual_dangerous] if args.key?(:contextual_dangerous)
  @csam = args[:csam] if args.key?(:csam)
  @fringe = args[:fringe] if args.key?(:fringe)
  @grail_image_harm_type = args[:grail_image_harm_type] if args.key?(:grail_image_harm_type)
  @grail_text_harm_type = args[:grail_text_harm_type] if args.key?(:grail_text_harm_type)
  @image_csam = args[:image_csam] if args.key?(:image_csam)
  @image_pedo = args[:image_pedo] if args.key?(:image_pedo)
  @image_porn = args[:image_porn] if args.key?(:image_porn)
  @image_violence = args[:image_violence] if args.key?(:image_violence)
  @pqc = args[:pqc] if args.key?(:pqc)
  @safetycat = args[:safetycat] if args.key?(:safetycat)
  @spii = args[:spii] if args.key?(:spii)
  @threshold = args[:threshold] if args.key?(:threshold)
  @video_frame_csam = args[:video_frame_csam] if args.key?(:video_frame_csam)
  @video_frame_pedo = args[:video_frame_pedo] if args.key?(:video_frame_pedo)
  @video_frame_porn = args[:video_frame_porn] if args.key?(:video_frame_porn)
  @video_frame_violence = args[:video_frame_violence] if args.key?(:video_frame_violence)
end