Class: Google::Apis::AiplatformV1::GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics

Inherits:
Object
  • Object
show all
Includes:
Core::Hashable, Core::JsonObjectSupport
Defined in:
lib/google/apis/aiplatform_v1/classes.rb,
lib/google/apis/aiplatform_v1/representations.rb,
lib/google/apis/aiplatform_v1/representations.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(**args) ⇒ GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics

Returns a new instance of GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics.



19591
19592
19593
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19591

def initialize(**args)
   update!(**args)
end

Instance Attribute Details

#confidence_thresholdFloat

Metrics are computed with an assumption that the Model never returns predictions with score lower than this value. Corresponds to the JSON property confidenceThreshold

Returns:

  • (Float)


19500
19501
19502
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19500

def confidence_threshold
  @confidence_threshold
end

#confusion_matrixGoogle::Apis::AiplatformV1::GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix

Confusion matrix of the evaluation for this confidence_threshold. Corresponds to the JSON property confusionMatrix



19505
19506
19507
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19505

def confusion_matrix
  @confusion_matrix
end

#f1_scoreFloat

The harmonic mean of recall and precision. For summary metrics, it computes the micro-averaged F1 score. Corresponds to the JSON property f1Score

Returns:

  • (Float)


19511
19512
19513
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19511

def f1_score
  @f1_score
end

#f1_score_at1Float

The harmonic mean of recallAt1 and precisionAt1. Corresponds to the JSON property f1ScoreAt1

Returns:

  • (Float)


19516
19517
19518
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19516

def f1_score_at1
  @f1_score_at1
end

#f1_score_macroFloat

Macro-averaged F1 Score. Corresponds to the JSON property f1ScoreMacro

Returns:

  • (Float)


19521
19522
19523
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19521

def f1_score_macro
  @f1_score_macro
end

#f1_score_microFloat

Micro-averaged F1 Score. Corresponds to the JSON property f1ScoreMicro

Returns:

  • (Float)


19526
19527
19528
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19526

def f1_score_micro
  @f1_score_micro
end

#false_negative_countFixnum

The number of ground truth labels that are not matched by a Model created label. Corresponds to the JSON property falseNegativeCount

Returns:

  • (Fixnum)


19532
19533
19534
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19532

def false_negative_count
  @false_negative_count
end

#false_positive_countFixnum

The number of Model created labels that do not match a ground truth label. Corresponds to the JSON property falsePositiveCount

Returns:

  • (Fixnum)


19537
19538
19539
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19537

def false_positive_count
  @false_positive_count
end

#false_positive_rateFloat

False Positive Rate for the given confidence threshold. Corresponds to the JSON property falsePositiveRate

Returns:

  • (Float)


19542
19543
19544
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19542

def false_positive_rate
  @false_positive_rate
end

#false_positive_rate_at1Float

The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property falsePositiveRateAt1

Returns:

  • (Float)


19548
19549
19550
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19548

def false_positive_rate_at1
  @false_positive_rate_at1
end

#max_predictionsFixnum

Metrics are computed with an assumption that the Model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the confidenceThreshold. Corresponds to the JSON property maxPredictions

Returns:

  • (Fixnum)


19555
19556
19557
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19555

def max_predictions
  @max_predictions
end

#precisionFloat

Precision for the given confidence threshold. Corresponds to the JSON property precision

Returns:

  • (Float)


19560
19561
19562
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19560

def precision
  @precision
end

#precision_at1Float

The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property precisionAt1

Returns:

  • (Float)


19566
19567
19568
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19566

def precision_at1
  @precision_at1
end

#recallFloat

Recall (True Positive Rate) for the given confidence threshold. Corresponds to the JSON property recall

Returns:

  • (Float)


19571
19572
19573
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19571

def recall
  @recall
end

#recall_at1Float

The Recall (True Positive Rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property recallAt1

Returns:

  • (Float)


19578
19579
19580
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19578

def recall_at1
  @recall_at1
end

#true_negative_countFixnum

The number of labels that were not created by the Model, but if they would, they would not match a ground truth label. Corresponds to the JSON property trueNegativeCount

Returns:

  • (Fixnum)


19584
19585
19586
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19584

def true_negative_count
  @true_negative_count
end

#true_positive_countFixnum

The number of Model created labels that match a ground truth label. Corresponds to the JSON property truePositiveCount

Returns:

  • (Fixnum)


19589
19590
19591
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19589

def true_positive_count
  @true_positive_count
end

Instance Method Details

#update!(**args) ⇒ Object

Update properties of this object



19596
19597
19598
19599
19600
19601
19602
19603
19604
19605
19606
19607
19608
19609
19610
19611
19612
19613
19614
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 19596

def update!(**args)
  @confidence_threshold = args[:confidence_threshold] if args.key?(:confidence_threshold)
  @confusion_matrix = args[:confusion_matrix] if args.key?(:confusion_matrix)
  @f1_score = args[:f1_score] if args.key?(:f1_score)
  @f1_score_at1 = args[:f1_score_at1] if args.key?(:f1_score_at1)
  @f1_score_macro = args[:f1_score_macro] if args.key?(:f1_score_macro)
  @f1_score_micro = args[:f1_score_micro] if args.key?(:f1_score_micro)
  @false_negative_count = args[:false_negative_count] if args.key?(:false_negative_count)
  @false_positive_count = args[:false_positive_count] if args.key?(:false_positive_count)
  @false_positive_rate = args[:false_positive_rate] if args.key?(:false_positive_rate)
  @false_positive_rate_at1 = args[:false_positive_rate_at1] if args.key?(:false_positive_rate_at1)
  @max_predictions = args[:max_predictions] if args.key?(:max_predictions)
  @precision = args[:precision] if args.key?(:precision)
  @precision_at1 = args[:precision_at1] if args.key?(:precision_at1)
  @recall = args[:recall] if args.key?(:recall)
  @recall_at1 = args[:recall_at1] if args.key?(:recall_at1)
  @true_negative_count = args[:true_negative_count] if args.key?(:true_negative_count)
  @true_positive_count = args[:true_positive_count] if args.key?(:true_positive_count)
end