Class: Google::Apis::AiplatformV1::GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics

Inherits:
Object
  • Object
show all
Includes:
Core::Hashable, Core::JsonObjectSupport
Defined in:
lib/google/apis/aiplatform_v1/classes.rb,
lib/google/apis/aiplatform_v1/representations.rb,
lib/google/apis/aiplatform_v1/representations.rb

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(**args) ⇒ GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics

Returns a new instance of GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics.



22683
22684
22685
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22683

def initialize(**args)
   update!(**args)
end

Instance Attribute Details

#confidence_thresholdFloat

Metrics are computed with an assumption that the Model never returns predictions with score lower than this value. Corresponds to the JSON property confidenceThreshold

Returns:

  • (Float)


22592
22593
22594
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22592

def confidence_threshold
  @confidence_threshold
end

#confusion_matrixGoogle::Apis::AiplatformV1::GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix

Confusion matrix of the evaluation for this confidence_threshold. Corresponds to the JSON property confusionMatrix



22597
22598
22599
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22597

def confusion_matrix
  @confusion_matrix
end

#f1_scoreFloat

The harmonic mean of recall and precision. For summary metrics, it computes the micro-averaged F1 score. Corresponds to the JSON property f1Score

Returns:

  • (Float)


22603
22604
22605
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22603

def f1_score
  @f1_score
end

#f1_score_at1Float

The harmonic mean of recallAt1 and precisionAt1. Corresponds to the JSON property f1ScoreAt1

Returns:

  • (Float)


22608
22609
22610
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22608

def f1_score_at1
  @f1_score_at1
end

#f1_score_macroFloat

Macro-averaged F1 Score. Corresponds to the JSON property f1ScoreMacro

Returns:

  • (Float)


22613
22614
22615
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22613

def f1_score_macro
  @f1_score_macro
end

#f1_score_microFloat

Micro-averaged F1 Score. Corresponds to the JSON property f1ScoreMicro

Returns:

  • (Float)


22618
22619
22620
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22618

def f1_score_micro
  @f1_score_micro
end

#false_negative_countFixnum

The number of ground truth labels that are not matched by a Model created label. Corresponds to the JSON property falseNegativeCount

Returns:

  • (Fixnum)


22624
22625
22626
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22624

def false_negative_count
  @false_negative_count
end

#false_positive_countFixnum

The number of Model created labels that do not match a ground truth label. Corresponds to the JSON property falsePositiveCount

Returns:

  • (Fixnum)


22629
22630
22631
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22629

def false_positive_count
  @false_positive_count
end

#false_positive_rateFloat

False Positive Rate for the given confidence threshold. Corresponds to the JSON property falsePositiveRate

Returns:

  • (Float)


22634
22635
22636
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22634

def false_positive_rate
  @false_positive_rate
end

#false_positive_rate_at1Float

The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property falsePositiveRateAt1

Returns:

  • (Float)


22640
22641
22642
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22640

def false_positive_rate_at1
  @false_positive_rate_at1
end

#max_predictionsFixnum

Metrics are computed with an assumption that the Model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the confidenceThreshold. Corresponds to the JSON property maxPredictions

Returns:

  • (Fixnum)


22647
22648
22649
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22647

def max_predictions
  @max_predictions
end

#precisionFloat

Precision for the given confidence threshold. Corresponds to the JSON property precision

Returns:

  • (Float)


22652
22653
22654
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22652

def precision
  @precision
end

#precision_at1Float

The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property precisionAt1

Returns:

  • (Float)


22658
22659
22660
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22658

def precision_at1
  @precision_at1
end

#recallFloat

Recall (True Positive Rate) for the given confidence threshold. Corresponds to the JSON property recall

Returns:

  • (Float)


22663
22664
22665
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22663

def recall
  @recall
end

#recall_at1Float

The Recall (True Positive Rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each DataItem. Corresponds to the JSON property recallAt1

Returns:

  • (Float)


22670
22671
22672
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22670

def recall_at1
  @recall_at1
end

#true_negative_countFixnum

The number of labels that were not created by the Model, but if they would, they would not match a ground truth label. Corresponds to the JSON property trueNegativeCount

Returns:

  • (Fixnum)


22676
22677
22678
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22676

def true_negative_count
  @true_negative_count
end

#true_positive_countFixnum

The number of Model created labels that match a ground truth label. Corresponds to the JSON property truePositiveCount

Returns:

  • (Fixnum)


22681
22682
22683
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22681

def true_positive_count
  @true_positive_count
end

Instance Method Details

#update!(**args) ⇒ Object

Update properties of this object



22688
22689
22690
22691
22692
22693
22694
22695
22696
22697
22698
22699
22700
22701
22702
22703
22704
22705
22706
# File 'lib/google/apis/aiplatform_v1/classes.rb', line 22688

def update!(**args)
  @confidence_threshold = args[:confidence_threshold] if args.key?(:confidence_threshold)
  @confusion_matrix = args[:confusion_matrix] if args.key?(:confusion_matrix)
  @f1_score = args[:f1_score] if args.key?(:f1_score)
  @f1_score_at1 = args[:f1_score_at1] if args.key?(:f1_score_at1)
  @f1_score_macro = args[:f1_score_macro] if args.key?(:f1_score_macro)
  @f1_score_micro = args[:f1_score_micro] if args.key?(:f1_score_micro)
  @false_negative_count = args[:false_negative_count] if args.key?(:false_negative_count)
  @false_positive_count = args[:false_positive_count] if args.key?(:false_positive_count)
  @false_positive_rate = args[:false_positive_rate] if args.key?(:false_positive_rate)
  @false_positive_rate_at1 = args[:false_positive_rate_at1] if args.key?(:false_positive_rate_at1)
  @max_predictions = args[:max_predictions] if args.key?(:max_predictions)
  @precision = args[:precision] if args.key?(:precision)
  @precision_at1 = args[:precision_at1] if args.key?(:precision_at1)
  @recall = args[:recall] if args.key?(:recall)
  @recall_at1 = args[:recall_at1] if args.key?(:recall_at1)
  @true_negative_count = args[:true_negative_count] if args.key?(:true_negative_count)
  @true_positive_count = args[:true_positive_count] if args.key?(:true_positive_count)
end