Class: Google::Cloud::AIPlatform::V1::Candidate

Inherits:
Object
  • Object
show all
Extended by:
Protobuf::MessageExts::ClassMethods
Includes:
Protobuf::MessageExts
Defined in:
proto_docs/google/cloud/aiplatform/v1/content.rb

Overview

A response candidate generated from the model.

Defined Under Namespace

Modules: FinishReason

Instance Attribute Summary collapse

Instance Attribute Details

#avg_logprobs::Float (readonly)

Returns Output only. Average log probability score of the candidate.

Returns:

  • (::Float)

    Output only. Average log probability score of the candidate.



405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 405

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9
  end
end

#citation_metadata::Google::Cloud::AIPlatform::V1::CitationMetadata (readonly)

Returns Output only. Source attribution of the generated content.

Returns:



405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 405

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9
  end
end

#content::Google::Cloud::AIPlatform::V1::Content (readonly)

Returns Output only. Content parts of the candidate.

Returns:



405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 405

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9
  end
end

#finish_message::String (readonly)

Returns Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when finish_reason is set.

Returns:

  • (::String)

    Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when finish_reason is set.



405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 405

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9
  end
end

#finish_reason::Google::Cloud::AIPlatform::V1::Candidate::FinishReason (readonly)

Returns Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens.

Returns:



405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 405

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9
  end
end

#grounding_metadata::Google::Cloud::AIPlatform::V1::GroundingMetadata (readonly)

Returns Output only. Metadata specifies sources used to ground generated content.

Returns:



405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 405

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9
  end
end

#index::Integer (readonly)

Returns Output only. Index of the candidate.

Returns:

  • (::Integer)

    Output only. Index of the candidate.



405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 405

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9
  end
end

#logprobs_result::Google::Cloud::AIPlatform::V1::LogprobsResult (readonly)

Returns Output only. Log-likelihood scores for the response tokens and top tokens.

Returns:



405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 405

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9
  end
end

#safety_ratings::Array<::Google::Cloud::AIPlatform::V1::SafetyRating> (readonly)

Returns Output only. List of ratings for the safety of a response candidate.

There is at most one rating per category.

Returns:



405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 405

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9
  end
end

#score::Float (readonly)

Returns Output only. Confidence score of the candidate.

Returns:

  • (::Float)

    Output only. Confidence score of the candidate.



405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 405

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9
  end
end