Class: Google::Apis::AiplatformV1::GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics

Inherits:
Object
  • Object
show all
Includes:
Core::Hashable, Core::JsonObjectSupport
Defined in:
lib/google/apis/aiplatform_v1/classes.rb,
lib/google/apis/aiplatform_v1/representations.rb,
lib/google/apis/aiplatform_v1/representations.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(**args) ⇒ GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics

Returns a new instance of GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics.



22259
22260
22261
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22259

def initialize(**args)
   update!(**args)
end

Instance Attribute Details

#confidence_thresholdFloat

Metrics are computed with an assumption that the Model never returns predictions with score lower than this value. Corresponds to the JSON property confidenceThreshold

Returns:

  • (Float)


22168
22169
22170
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22168

def confidence_threshold
  @confidence_threshold
end

#confusion_matrixGoogle::Apis::AiplatformV1::GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix

Confusion matrix of the evaluation for this confidence_threshold. Corresponds to the JSON property confusionMatrix



22173
22174
22175
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22173

def confusion_matrix
  @confusion_matrix
end

#f1_scoreFloat

The harmonic mean of recall and precision. For summary metrics, it computes the micro-averaged F1 score. Corresponds to the JSON property f1Score

Returns:

  • (Float)


22179
22180
22181
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22179

def f1_score
  @f1_score
end

#f1_score_at1Float

The harmonic mean of recallAt1 and precisionAt1. Corresponds to the JSON property f1ScoreAt1

Returns:

  • (Float)


22184
22185
22186
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22184

def f1_score_at1
  @f1_score_at1
end

#f1_score_macroFloat

Macro-averaged F1 Score. Corresponds to the JSON property f1ScoreMacro

Returns:

  • (Float)


22189
22190
22191
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22189

def f1_score_macro
  @f1_score_macro
end

#f1_score_microFloat

Micro-averaged F1 Score. Corresponds to the JSON property f1ScoreMicro

Returns:

  • (Float)


22194
22195
22196
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22194

def f1_score_micro
  @f1_score_micro
end

#false_negative_countFixnum

The number of ground truth labels that are not matched by a Model created label. Corresponds to the JSON property falseNegativeCount

Returns:

  • (Fixnum)


22200
22201
22202
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22200

def false_negative_count
  @false_negative_count
end

#false_positive_countFixnum

The number of Model created labels that do not match a ground truth label. Corresponds to the JSON property falsePositiveCount

Returns:

  • (Fixnum)


22205
22206
22207
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22205

def false_positive_count
  @false_positive_count
end

#false_positive_rateFloat

False Positive Rate for the given confidence threshold. Corresponds to the JSON property falsePositiveRate

Returns:

  • (Float)


22210
22211
22212
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22210

def false_positive_rate
  @false_positive_rate
end

#false_positive_rate_at1Float

The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property falsePositiveRateAt1

Returns:

  • (Float)


22216
22217
22218
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22216

def false_positive_rate_at1
  @false_positive_rate_at1
end

#max_predictionsFixnum

Metrics are computed with an assumption that the Model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the confidenceThreshold. Corresponds to the JSON property maxPredictions

Returns:

  • (Fixnum)


22223
22224
22225
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22223

def max_predictions
  @max_predictions
end

#precisionFloat

Precision for the given confidence threshold. Corresponds to the JSON property precision

Returns:

  • (Float)


22228
22229
22230
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22228

def precision
  @precision
end

#precision_at1Float

The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property precisionAt1

Returns:

  • (Float)


22234
22235
22236
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22234

def precision_at1
  @precision_at1
end

#recallFloat

Recall (True Positive Rate) for the given confidence threshold. Corresponds to the JSON property recall

Returns:

  • (Float)


22239
22240
22241
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22239

def recall
  @recall
end

#recall_at1Float

The Recall (True Positive Rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property recallAt1

Returns:

  • (Float)


22246
22247
22248
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22246

def recall_at1
  @recall_at1
end

#true_negative_countFixnum

The number of labels that were not created by the Model, but if they would, they would not match a ground truth label. Corresponds to the JSON property trueNegativeCount

Returns:

  • (Fixnum)


22252
22253
22254
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22252

def true_negative_count
  @true_negative_count
end

#true_positive_countFixnum

The number of Model created labels that match a ground truth label. Corresponds to the JSON property truePositiveCount

Returns:

  • (Fixnum)


22257
22258
22259
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22257

def true_positive_count
  @true_positive_count
end

Instance Method Details

#update!(**args) ⇒ Object

Update properties of this object



22264
22265
22266
22267
22268
22269
22270
22271
22272
22273
22274
22275
22276
22277
22278
22279
22280
22281
22282
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22264

def update!(**args)
  @confidence_threshold = args[:confidence_threshold] if args.key?(:confidence_threshold)
  @confusion_matrix = args[:confusion_matrix] if args.key?(:confusion_matrix)
  @f1_score = args[:f1_score] if args.key?(:f1_score)
  @f1_score_at1 = args[:f1_score_at1] if args.key?(:f1_score_at1)
  @f1_score_macro = args[:f1_score_macro] if args.key?(:f1_score_macro)
  @f1_score_micro = args[:f1_score_micro] if args.key?(:f1_score_micro)
  @false_negative_count = args[:false_negative_count] if args.key?(:false_negative_count)
  @false_positive_count = args[:false_positive_count] if args.key?(:false_positive_count)
  @false_positive_rate = args[:false_positive_rate] if args.key?(:false_positive_rate)
  @false_positive_rate_at1 = args[:false_positive_rate_at1] if args.key?(:false_positive_rate_at1)
  @max_predictions = args[:max_predictions] if args.key?(:max_predictions)
  @precision = args[:precision] if args.key?(:precision)
  @precision_at1 = args[:precision_at1] if args.key?(:precision_at1)
  @recall = args[:recall] if args.key?(:recall)
  @recall_at1 = args[:recall_at1] if args.key?(:recall_at1)
  @true_negative_count = args[:true_negative_count] if args.key?(:true_negative_count)
  @true_positive_count = args[:true_positive_count] if args.key?(:true_positive_count)
end