Class: Google::Apis::AiplatformV1::GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics

Inherits:
Object
  • Object
show all
Includes:
Core::Hashable, Core::JsonObjectSupport
Defined in:
lib/google/apis/aiplatform_v1/classes.rb,
lib/google/apis/aiplatform_v1/representations.rb,
lib/google/apis/aiplatform_v1/representations.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(**args) ⇒ GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics

Returns a new instance of GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics.



22789
22790
22791
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22789

def initialize(**args)
   update!(**args)
end

Instance Attribute Details

#confidence_thresholdFloat

Metrics are computed with an assumption that the Model never returns predictions with score lower than this value. Corresponds to the JSON property confidenceThreshold

Returns:

  • (Float)


22698
22699
22700
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22698

def confidence_threshold
  @confidence_threshold
end

#confusion_matrixGoogle::Apis::AiplatformV1::GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix

Confusion matrix of the evaluation for this confidence_threshold. Corresponds to the JSON property confusionMatrix



22703
22704
22705
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22703

def confusion_matrix
  @confusion_matrix
end

#f1_scoreFloat

The harmonic mean of recall and precision. For summary metrics, it computes the micro-averaged F1 score. Corresponds to the JSON property f1Score

Returns:

  • (Float)


22709
22710
22711
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22709

def f1_score
  @f1_score
end

#f1_score_at1Float

The harmonic mean of recallAt1 and precisionAt1. Corresponds to the JSON property f1ScoreAt1

Returns:

  • (Float)


22714
22715
22716
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22714

def f1_score_at1
  @f1_score_at1
end

#f1_score_macroFloat

Macro-averaged F1 Score. Corresponds to the JSON property f1ScoreMacro

Returns:

  • (Float)


22719
22720
22721
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22719

def f1_score_macro
  @f1_score_macro
end

#f1_score_microFloat

Micro-averaged F1 Score. Corresponds to the JSON property f1ScoreMicro

Returns:

  • (Float)


22724
22725
22726
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22724

def f1_score_micro
  @f1_score_micro
end

#false_negative_countFixnum

The number of ground truth labels that are not matched by a Model created label. Corresponds to the JSON property falseNegativeCount

Returns:

  • (Fixnum)


22730
22731
22732
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22730

def false_negative_count
  @false_negative_count
end

#false_positive_countFixnum

The number of Model created labels that do not match a ground truth label. Corresponds to the JSON property falsePositiveCount

Returns:

  • (Fixnum)


22735
22736
22737
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22735

def false_positive_count
  @false_positive_count
end

#false_positive_rateFloat

False Positive Rate for the given confidence threshold. Corresponds to the JSON property falsePositiveRate

Returns:

  • (Float)


22740
22741
22742
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22740

def false_positive_rate
  @false_positive_rate
end

#false_positive_rate_at1Float

The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property falsePositiveRateAt1

Returns:

  • (Float)


22746
22747
22748
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22746

def false_positive_rate_at1
  @false_positive_rate_at1
end

#max_predictionsFixnum

Metrics are computed with an assumption that the Model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the confidenceThreshold. Corresponds to the JSON property maxPredictions

Returns:

  • (Fixnum)


22753
22754
22755
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22753

def max_predictions
  @max_predictions
end

#precisionFloat

Precision for the given confidence threshold. Corresponds to the JSON property precision

Returns:

  • (Float)


22758
22759
22760
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22758

def precision
  @precision
end

#precision_at1Float

The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property precisionAt1

Returns:

  • (Float)


22764
22765
22766
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22764

def precision_at1
  @precision_at1
end

#recallFloat

Recall (True Positive Rate) for the given confidence threshold. Corresponds to the JSON property recall

Returns:

  • (Float)


22769
22770
22771
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22769

def recall
  @recall
end

#recall_at1Float

The Recall (True Positive Rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property recallAt1

Returns:

  • (Float)


22776
22777
22778
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22776

def recall_at1
  @recall_at1
end

#true_negative_countFixnum

The number of labels that were not created by the Model, but if they would, they would not match a ground truth label. Corresponds to the JSON property trueNegativeCount

Returns:

  • (Fixnum)


22782
22783
22784
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22782

def true_negative_count
  @true_negative_count
end

#true_positive_countFixnum

The number of Model created labels that match a ground truth label. Corresponds to the JSON property truePositiveCount

Returns:

  • (Fixnum)


22787
22788
22789
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22787

def true_positive_count
  @true_positive_count
end

Instance Method Details

#update!(**args) ⇒ Object

Update properties of this object



22794
22795
22796
22797
22798
22799
22800
22801
22802
22803
22804
22805
22806
22807
22808
22809
22810
22811
22812
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22794

def update!(**args)
  @confidence_threshold = args[:confidence_threshold] if args.key?(:confidence_threshold)
  @confusion_matrix = args[:confusion_matrix] if args.key?(:confusion_matrix)
  @f1_score = args[:f1_score] if args.key?(:f1_score)
  @f1_score_at1 = args[:f1_score_at1] if args.key?(:f1_score_at1)
  @f1_score_macro = args[:f1_score_macro] if args.key?(:f1_score_macro)
  @f1_score_micro = args[:f1_score_micro] if args.key?(:f1_score_micro)
  @false_negative_count = args[:false_negative_count] if args.key?(:false_negative_count)
  @false_positive_count = args[:false_positive_count] if args.key?(:false_positive_count)
  @false_positive_rate = args[:false_positive_rate] if args.key?(:false_positive_rate)
  @false_positive_rate_at1 = args[:false_positive_rate_at1] if args.key?(:false_positive_rate_at1)
  @max_predictions = args[:max_predictions] if args.key?(:max_predictions)
  @precision = args[:precision] if args.key?(:precision)
  @precision_at1 = args[:precision_at1] if args.key?(:precision_at1)
  @recall = args[:recall] if args.key?(:recall)
  @recall_at1 = args[:recall_at1] if args.key?(:recall_at1)
  @true_negative_count = args[:true_negative_count] if args.key?(:true_negative_count)
  @true_positive_count = args[:true_positive_count] if args.key?(:true_positive_count)
end