...

Package generativeaiinference

import "github.com/oracle/oci-go-sdk/generativeaiinference"
Overview
Index

Overview ▾

Index ▾

func GetBaseChatRequestApiFormatEnumStringValues() []string
func GetBaseChatResponseApiFormatEnumStringValues() []string
func GetChatContentTypeEnumStringValues() []string
func GetCohereChatResponseFinishReasonEnumStringValues() []string
func GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues() []string
func GetCohereLlmInferenceRequestTruncateEnumStringValues() []string
func GetCohereMessageRoleEnumStringValues() []string
func GetEmbedTextDetailsInputTypeEnumStringValues() []string
func GetEmbedTextDetailsTruncateEnumStringValues() []string
func GetLlmInferenceRequestRuntimeTypeEnumStringValues() []string
func GetLlmInferenceResponseRuntimeTypeEnumStringValues() []string
func GetServingModeServingTypeEnumStringValues() []string
func GetSummarizeTextDetailsExtractivenessEnumStringValues() []string
func GetSummarizeTextDetailsFormatEnumStringValues() []string
func GetSummarizeTextDetailsLengthEnumStringValues() []string
type BaseChatRequest
type BaseChatRequestApiFormatEnum
    func GetBaseChatRequestApiFormatEnumValues() []BaseChatRequestApiFormatEnum
    func GetMappingBaseChatRequestApiFormatEnum(val string) (BaseChatRequestApiFormatEnum, bool)
type BaseChatResponse
type BaseChatResponseApiFormatEnum
    func GetBaseChatResponseApiFormatEnumValues() []BaseChatResponseApiFormatEnum
    func GetMappingBaseChatResponseApiFormatEnum(val string) (BaseChatResponseApiFormatEnum, bool)
type ChatChoice
    func (m ChatChoice) String() string
    func (m ChatChoice) ValidateEnumValue() (bool, error)
type ChatContent
type ChatContentTypeEnum
    func GetChatContentTypeEnumValues() []ChatContentTypeEnum
    func GetMappingChatContentTypeEnum(val string) (ChatContentTypeEnum, bool)
type ChatDetails
    func (m ChatDetails) String() string
    func (m *ChatDetails) UnmarshalJSON(data []byte) (e error)
    func (m ChatDetails) ValidateEnumValue() (bool, error)
type ChatRequest
    func (request ChatRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
    func (request ChatRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
    func (request ChatRequest) RetryPolicy() *common.RetryPolicy
    func (request ChatRequest) String() string
    func (request ChatRequest) ValidateEnumValue() (bool, error)
type ChatResponse
    func (response ChatResponse) HTTPResponse() *http.Response
    func (response ChatResponse) String() string
type ChatResult
    func (m ChatResult) String() string
    func (m *ChatResult) UnmarshalJSON(data []byte) (e error)
    func (m ChatResult) ValidateEnumValue() (bool, error)
type Choice
    func (m Choice) String() string
    func (m Choice) ValidateEnumValue() (bool, error)
type Citation
    func (m Citation) String() string
    func (m Citation) ValidateEnumValue() (bool, error)
type CohereChatRequest
    func (m CohereChatRequest) MarshalJSON() (buff []byte, e error)
    func (m CohereChatRequest) String() string
    func (m CohereChatRequest) ValidateEnumValue() (bool, error)
type CohereChatResponse
    func (m CohereChatResponse) MarshalJSON() (buff []byte, e error)
    func (m CohereChatResponse) String() string
    func (m CohereChatResponse) ValidateEnumValue() (bool, error)
type CohereChatResponseFinishReasonEnum
    func GetCohereChatResponseFinishReasonEnumValues() []CohereChatResponseFinishReasonEnum
    func GetMappingCohereChatResponseFinishReasonEnum(val string) (CohereChatResponseFinishReasonEnum, bool)
type CohereLlmInferenceRequest
    func (m CohereLlmInferenceRequest) MarshalJSON() (buff []byte, e error)
    func (m CohereLlmInferenceRequest) String() string
    func (m CohereLlmInferenceRequest) ValidateEnumValue() (bool, error)
type CohereLlmInferenceRequestReturnLikelihoodsEnum
    func GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues() []CohereLlmInferenceRequestReturnLikelihoodsEnum
    func GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum(val string) (CohereLlmInferenceRequestReturnLikelihoodsEnum, bool)
type CohereLlmInferenceRequestTruncateEnum
    func GetCohereLlmInferenceRequestTruncateEnumValues() []CohereLlmInferenceRequestTruncateEnum
    func GetMappingCohereLlmInferenceRequestTruncateEnum(val string) (CohereLlmInferenceRequestTruncateEnum, bool)
type CohereLlmInferenceResponse
    func (m CohereLlmInferenceResponse) MarshalJSON() (buff []byte, e error)
    func (m CohereLlmInferenceResponse) String() string
    func (m CohereLlmInferenceResponse) ValidateEnumValue() (bool, error)
type CohereMessage
    func (m CohereMessage) String() string
    func (m CohereMessage) ValidateEnumValue() (bool, error)
type CohereMessageRoleEnum
    func GetCohereMessageRoleEnumValues() []CohereMessageRoleEnum
    func GetMappingCohereMessageRoleEnum(val string) (CohereMessageRoleEnum, bool)
type DedicatedServingMode
    func (m DedicatedServingMode) MarshalJSON() (buff []byte, e error)
    func (m DedicatedServingMode) String() string
    func (m DedicatedServingMode) ValidateEnumValue() (bool, error)
type EmbedTextDetails
    func (m EmbedTextDetails) String() string
    func (m *EmbedTextDetails) UnmarshalJSON(data []byte) (e error)
    func (m EmbedTextDetails) ValidateEnumValue() (bool, error)
type EmbedTextDetailsInputTypeEnum
    func GetEmbedTextDetailsInputTypeEnumValues() []EmbedTextDetailsInputTypeEnum
    func GetMappingEmbedTextDetailsInputTypeEnum(val string) (EmbedTextDetailsInputTypeEnum, bool)
type EmbedTextDetailsTruncateEnum
    func GetEmbedTextDetailsTruncateEnumValues() []EmbedTextDetailsTruncateEnum
    func GetMappingEmbedTextDetailsTruncateEnum(val string) (EmbedTextDetailsTruncateEnum, bool)
type EmbedTextRequest
    func (request EmbedTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
    func (request EmbedTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
    func (request EmbedTextRequest) RetryPolicy() *common.RetryPolicy
    func (request EmbedTextRequest) String() string
    func (request EmbedTextRequest) ValidateEnumValue() (bool, error)
type EmbedTextResponse
    func (response EmbedTextResponse) HTTPResponse() *http.Response
    func (response EmbedTextResponse) String() string
type EmbedTextResult
    func (m EmbedTextResult) String() string
    func (m EmbedTextResult) ValidateEnumValue() (bool, error)
type GenerateTextDetails
    func (m GenerateTextDetails) String() string
    func (m *GenerateTextDetails) UnmarshalJSON(data []byte) (e error)
    func (m GenerateTextDetails) ValidateEnumValue() (bool, error)
type GenerateTextRequest
    func (request GenerateTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
    func (request GenerateTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
    func (request GenerateTextRequest) RetryPolicy() *common.RetryPolicy
    func (request GenerateTextRequest) String() string
    func (request GenerateTextRequest) ValidateEnumValue() (bool, error)
type GenerateTextResponse
    func (response GenerateTextResponse) HTTPResponse() *http.Response
    func (response GenerateTextResponse) String() string
type GenerateTextResult
    func (m GenerateTextResult) String() string
    func (m *GenerateTextResult) UnmarshalJSON(data []byte) (e error)
    func (m GenerateTextResult) ValidateEnumValue() (bool, error)
type GeneratedText
    func (m GeneratedText) String() string
    func (m GeneratedText) ValidateEnumValue() (bool, error)
type GenerativeAiInferenceClient
    func NewGenerativeAiInferenceClientWithConfigurationProvider(configProvider common.ConfigurationProvider) (client GenerativeAiInferenceClient, err error)
    func NewGenerativeAiInferenceClientWithOboToken(configProvider common.ConfigurationProvider, oboToken string) (client GenerativeAiInferenceClient, err error)
    func (client GenerativeAiInferenceClient) Chat(ctx context.Context, request ChatRequest) (response ChatResponse, err error)
    func (client *GenerativeAiInferenceClient) ConfigurationProvider() *common.ConfigurationProvider
    func (client GenerativeAiInferenceClient) EmbedText(ctx context.Context, request EmbedTextRequest) (response EmbedTextResponse, err error)
    func (client GenerativeAiInferenceClient) GenerateText(ctx context.Context, request GenerateTextRequest) (response GenerateTextResponse, err error)
    func (client *GenerativeAiInferenceClient) SetRegion(region string)
    func (client GenerativeAiInferenceClient) SummarizeText(ctx context.Context, request SummarizeTextRequest) (response SummarizeTextResponse, err error)
type GenericChatRequest
    func (m GenericChatRequest) MarshalJSON() (buff []byte, e error)
    func (m GenericChatRequest) String() string
    func (m GenericChatRequest) ValidateEnumValue() (bool, error)
type GenericChatResponse
    func (m GenericChatResponse) MarshalJSON() (buff []byte, e error)
    func (m GenericChatResponse) String() string
    func (m GenericChatResponse) ValidateEnumValue() (bool, error)
type LlamaLlmInferenceRequest
    func (m LlamaLlmInferenceRequest) MarshalJSON() (buff []byte, e error)
    func (m LlamaLlmInferenceRequest) String() string
    func (m LlamaLlmInferenceRequest) ValidateEnumValue() (bool, error)
type LlamaLlmInferenceResponse
    func (m LlamaLlmInferenceResponse) MarshalJSON() (buff []byte, e error)
    func (m LlamaLlmInferenceResponse) String() string
    func (m LlamaLlmInferenceResponse) ValidateEnumValue() (bool, error)
type LlmInferenceRequest
type LlmInferenceRequestRuntimeTypeEnum
    func GetLlmInferenceRequestRuntimeTypeEnumValues() []LlmInferenceRequestRuntimeTypeEnum
    func GetMappingLlmInferenceRequestRuntimeTypeEnum(val string) (LlmInferenceRequestRuntimeTypeEnum, bool)
type LlmInferenceResponse
type LlmInferenceResponseRuntimeTypeEnum
    func GetLlmInferenceResponseRuntimeTypeEnumValues() []LlmInferenceResponseRuntimeTypeEnum
    func GetMappingLlmInferenceResponseRuntimeTypeEnum(val string) (LlmInferenceResponseRuntimeTypeEnum, bool)
type Logprobs
    func (m Logprobs) String() string
    func (m Logprobs) ValidateEnumValue() (bool, error)
type Message
    func (m Message) String() string
    func (m *Message) UnmarshalJSON(data []byte) (e error)
    func (m Message) ValidateEnumValue() (bool, error)
type OnDemandServingMode
    func (m OnDemandServingMode) MarshalJSON() (buff []byte, e error)
    func (m OnDemandServingMode) String() string
    func (m OnDemandServingMode) ValidateEnumValue() (bool, error)
type SearchQuery
    func (m SearchQuery) String() string
    func (m SearchQuery) ValidateEnumValue() (bool, error)
type ServingMode
type ServingModeServingTypeEnum
    func GetMappingServingModeServingTypeEnum(val string) (ServingModeServingTypeEnum, bool)
    func GetServingModeServingTypeEnumValues() []ServingModeServingTypeEnum
type SummarizeTextDetails
    func (m SummarizeTextDetails) String() string
    func (m *SummarizeTextDetails) UnmarshalJSON(data []byte) (e error)
    func (m SummarizeTextDetails) ValidateEnumValue() (bool, error)
type SummarizeTextDetailsExtractivenessEnum
    func GetMappingSummarizeTextDetailsExtractivenessEnum(val string) (SummarizeTextDetailsExtractivenessEnum, bool)
    func GetSummarizeTextDetailsExtractivenessEnumValues() []SummarizeTextDetailsExtractivenessEnum
type SummarizeTextDetailsFormatEnum
    func GetMappingSummarizeTextDetailsFormatEnum(val string) (SummarizeTextDetailsFormatEnum, bool)
    func GetSummarizeTextDetailsFormatEnumValues() []SummarizeTextDetailsFormatEnum
type SummarizeTextDetailsLengthEnum
    func GetMappingSummarizeTextDetailsLengthEnum(val string) (SummarizeTextDetailsLengthEnum, bool)
    func GetSummarizeTextDetailsLengthEnumValues() []SummarizeTextDetailsLengthEnum
type SummarizeTextRequest
    func (request SummarizeTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
    func (request SummarizeTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
    func (request SummarizeTextRequest) RetryPolicy() *common.RetryPolicy
    func (request SummarizeTextRequest) String() string
    func (request SummarizeTextRequest) ValidateEnumValue() (bool, error)
type SummarizeTextResponse
    func (response SummarizeTextResponse) HTTPResponse() *http.Response
    func (response SummarizeTextResponse) String() string
type SummarizeTextResult
    func (m SummarizeTextResult) String() string
    func (m SummarizeTextResult) ValidateEnumValue() (bool, error)
type TextContent
    func (m TextContent) MarshalJSON() (buff []byte, e error)
    func (m TextContent) String() string
    func (m TextContent) ValidateEnumValue() (bool, error)
type TokenLikelihood
    func (m TokenLikelihood) String() string
    func (m TokenLikelihood) ValidateEnumValue() (bool, error)

Package files

base_chat_request.go base_chat_response.go chat_choice.go chat_content.go chat_details.go chat_request_response.go chat_result.go choice.go citation.go cohere_chat_request.go cohere_chat_response.go cohere_llm_inference_request.go cohere_llm_inference_response.go cohere_message.go dedicated_serving_mode.go embed_text_details.go embed_text_request_response.go embed_text_result.go generate_text_details.go generate_text_request_response.go generate_text_result.go generated_text.go generativeaiinference_client.go generic_chat_request.go generic_chat_response.go llama_llm_inference_request.go llama_llm_inference_response.go llm_inference_request.go llm_inference_response.go logprobs.go message.go on_demand_serving_mode.go search_query.go serving_mode.go summarize_text_details.go summarize_text_request_response.go summarize_text_result.go text_content.go token_likelihood.go

func GetBaseChatRequestApiFormatEnumStringValues

func GetBaseChatRequestApiFormatEnumStringValues() []string

GetBaseChatRequestApiFormatEnumStringValues Enumerates the set of values in String for BaseChatRequestApiFormatEnum

func GetBaseChatResponseApiFormatEnumStringValues

func GetBaseChatResponseApiFormatEnumStringValues() []string

GetBaseChatResponseApiFormatEnumStringValues Enumerates the set of values in String for BaseChatResponseApiFormatEnum

func GetChatContentTypeEnumStringValues

func GetChatContentTypeEnumStringValues() []string

GetChatContentTypeEnumStringValues Enumerates the set of values in String for ChatContentTypeEnum

func GetCohereChatResponseFinishReasonEnumStringValues

func GetCohereChatResponseFinishReasonEnumStringValues() []string

GetCohereChatResponseFinishReasonEnumStringValues Enumerates the set of values in String for CohereChatResponseFinishReasonEnum

func GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues

func GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues() []string

GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues Enumerates the set of values in String for CohereLlmInferenceRequestReturnLikelihoodsEnum

func GetCohereLlmInferenceRequestTruncateEnumStringValues

func GetCohereLlmInferenceRequestTruncateEnumStringValues() []string

GetCohereLlmInferenceRequestTruncateEnumStringValues Enumerates the set of values in String for CohereLlmInferenceRequestTruncateEnum

func GetCohereMessageRoleEnumStringValues

func GetCohereMessageRoleEnumStringValues() []string

GetCohereMessageRoleEnumStringValues Enumerates the set of values in String for CohereMessageRoleEnum

func GetEmbedTextDetailsInputTypeEnumStringValues

func GetEmbedTextDetailsInputTypeEnumStringValues() []string

GetEmbedTextDetailsInputTypeEnumStringValues Enumerates the set of values in String for EmbedTextDetailsInputTypeEnum

func GetEmbedTextDetailsTruncateEnumStringValues

func GetEmbedTextDetailsTruncateEnumStringValues() []string

GetEmbedTextDetailsTruncateEnumStringValues Enumerates the set of values in String for EmbedTextDetailsTruncateEnum

func GetLlmInferenceRequestRuntimeTypeEnumStringValues

func GetLlmInferenceRequestRuntimeTypeEnumStringValues() []string

GetLlmInferenceRequestRuntimeTypeEnumStringValues Enumerates the set of values in String for LlmInferenceRequestRuntimeTypeEnum

func GetLlmInferenceResponseRuntimeTypeEnumStringValues

func GetLlmInferenceResponseRuntimeTypeEnumStringValues() []string

GetLlmInferenceResponseRuntimeTypeEnumStringValues Enumerates the set of values in String for LlmInferenceResponseRuntimeTypeEnum

func GetServingModeServingTypeEnumStringValues

func GetServingModeServingTypeEnumStringValues() []string

GetServingModeServingTypeEnumStringValues Enumerates the set of values in String for ServingModeServingTypeEnum

func GetSummarizeTextDetailsExtractivenessEnumStringValues

func GetSummarizeTextDetailsExtractivenessEnumStringValues() []string

GetSummarizeTextDetailsExtractivenessEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsExtractivenessEnum

func GetSummarizeTextDetailsFormatEnumStringValues

func GetSummarizeTextDetailsFormatEnumStringValues() []string

GetSummarizeTextDetailsFormatEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsFormatEnum

func GetSummarizeTextDetailsLengthEnumStringValues

func GetSummarizeTextDetailsLengthEnumStringValues() []string

GetSummarizeTextDetailsLengthEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsLengthEnum

type BaseChatRequest

BaseChatRequest Base class for chat inference requests

type BaseChatRequest interface {
}

type BaseChatRequestApiFormatEnum

BaseChatRequestApiFormatEnum Enum with underlying type: string

type BaseChatRequestApiFormatEnum string

Set of constants representing the allowable values for BaseChatRequestApiFormatEnum

const (
    BaseChatRequestApiFormatCohere  BaseChatRequestApiFormatEnum = "COHERE"
    BaseChatRequestApiFormatGeneric BaseChatRequestApiFormatEnum = "GENERIC"
)

func GetBaseChatRequestApiFormatEnumValues

func GetBaseChatRequestApiFormatEnumValues() []BaseChatRequestApiFormatEnum

GetBaseChatRequestApiFormatEnumValues Enumerates the set of values for BaseChatRequestApiFormatEnum

func GetMappingBaseChatRequestApiFormatEnum

func GetMappingBaseChatRequestApiFormatEnum(val string) (BaseChatRequestApiFormatEnum, bool)

GetMappingBaseChatRequestApiFormatEnum performs case Insensitive comparison on enum value and return the desired enum

type BaseChatResponse

BaseChatResponse Base class for chat inference response

type BaseChatResponse interface {
}

type BaseChatResponseApiFormatEnum

BaseChatResponseApiFormatEnum Enum with underlying type: string

type BaseChatResponseApiFormatEnum string

Set of constants representing the allowable values for BaseChatResponseApiFormatEnum

const (
    BaseChatResponseApiFormatCohere  BaseChatResponseApiFormatEnum = "COHERE"
    BaseChatResponseApiFormatGeneric BaseChatResponseApiFormatEnum = "GENERIC"
)

func GetBaseChatResponseApiFormatEnumValues

func GetBaseChatResponseApiFormatEnumValues() []BaseChatResponseApiFormatEnum

GetBaseChatResponseApiFormatEnumValues Enumerates the set of values for BaseChatResponseApiFormatEnum

func GetMappingBaseChatResponseApiFormatEnum

func GetMappingBaseChatResponseApiFormatEnum(val string) (BaseChatResponseApiFormatEnum, bool)

GetMappingBaseChatResponseApiFormatEnum performs case Insensitive comparison on enum value and return the desired enum

type ChatChoice

ChatChoice Represents a single instance of the chat response.

type ChatChoice struct {

    // The index of the chat.
    Index *int `mandatory:"true" json:"index"`

    Message *Message `mandatory:"true" json:"message"`

    // The reason why the model stopped generating tokens.
    // Stops if the model hits a natural stop point or a provided stop sequence. Returns the length if the tokens reach the specified maximum number of tokens.
    FinishReason *string `mandatory:"true" json:"finishReason"`

    Logprobs *Logprobs `mandatory:"false" json:"logprobs"`
}

func (ChatChoice) String

func (m ChatChoice) String() string

func (ChatChoice) ValidateEnumValue

func (m ChatChoice) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type ChatContent

ChatContent The base class for the chat content.

type ChatContent interface {
}

type ChatContentTypeEnum

ChatContentTypeEnum Enum with underlying type: string

type ChatContentTypeEnum string

Set of constants representing the allowable values for ChatContentTypeEnum

const (
    ChatContentTypeText ChatContentTypeEnum = "TEXT"
)

func GetChatContentTypeEnumValues

func GetChatContentTypeEnumValues() []ChatContentTypeEnum

GetChatContentTypeEnumValues Enumerates the set of values for ChatContentTypeEnum

func GetMappingChatContentTypeEnum

func GetMappingChatContentTypeEnum(val string) (ChatContentTypeEnum, bool)

GetMappingChatContentTypeEnum performs case Insensitive comparison on enum value and return the desired enum

type ChatDetails

ChatDetails Details of the conversation for the model to respond.

type ChatDetails struct {

    // The OCID of compartment that the user is authorized to use to call into the Generative AI service.
    CompartmentId *string `mandatory:"true" json:"compartmentId"`

    ServingMode ServingMode `mandatory:"true" json:"servingMode"`

    ChatRequest BaseChatRequest `mandatory:"false" json:"chatRequest"`
}

func (ChatDetails) String

func (m ChatDetails) String() string

func (*ChatDetails) UnmarshalJSON

func (m *ChatDetails) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (ChatDetails) ValidateEnumValue

func (m ChatDetails) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type ChatRequest

ChatRequest wrapper for the Chat operation

# See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.65.0/generativeaiinference/Chat.go.html to see an example of how to use ChatRequest.

type ChatRequest struct {

    // Details of the conversation for the model to respond.
    ChatDetails `contributesTo:"body"`

    // A token that uniquely identifies a request so it can be retried in case of a timeout or
    // server error without risk of executing that same action again. Retry tokens expire after 24
    // hours, but can be invalidated before then due to conflicting operations. For example, if a resource
    // has been deleted and purged from the system, then a retry of the original creation request
    // might be rejected.
    OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"`

    // The client request ID for tracing.
    OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`

    // Metadata about the request. This information will not be transmitted to the service, but
    // represents information that the SDK will consume to drive retry behavior.
    RequestMetadata common.RequestMetadata
}

func (ChatRequest) BinaryRequestBody

func (request ChatRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)

BinaryRequestBody implements the OCIRequest interface

func (ChatRequest) HTTPRequest

func (request ChatRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)

HTTPRequest implements the OCIRequest interface

func (ChatRequest) RetryPolicy

func (request ChatRequest) RetryPolicy() *common.RetryPolicy

RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.

func (ChatRequest) String

func (request ChatRequest) String() string

func (ChatRequest) ValidateEnumValue

func (request ChatRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type ChatResponse

ChatResponse wrapper for the Chat operation

type ChatResponse struct {

    // The underlying http response
    RawResponse *http.Response

    // The ChatResult instance
    ChatResult `presentIn:"body"`

    // For optimistic concurrency control. See `if-match`.
    Etag *string `presentIn:"header" name:"etag"`

    // Unique Oracle-assigned identifier for the request. If you need to contact
    // Oracle about a particular request, please provide the request ID.
    OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}

func (ChatResponse) HTTPResponse

func (response ChatResponse) HTTPResponse() *http.Response

HTTPResponse implements the OCIResponse interface

func (ChatResponse) String

func (response ChatResponse) String() string

type ChatResult

ChatResult The response to the chat conversation.

type ChatResult struct {

    // The OCID of the model used in this inference request.
    ModelId *string `mandatory:"true" json:"modelId"`

    // The version of the model.
    ModelVersion *string `mandatory:"true" json:"modelVersion"`

    ChatResponse BaseChatResponse `mandatory:"true" json:"chatResponse"`
}

func (ChatResult) String

func (m ChatResult) String() string

func (*ChatResult) UnmarshalJSON

func (m *ChatResult) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (ChatResult) ValidateEnumValue

func (m ChatResult) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type Choice

Choice Represents a single instance of generated text.

type Choice struct {

    // The index of the generated text.
    Index *int `mandatory:"true" json:"index"`

    // The generated text.
    Text *string `mandatory:"true" json:"text"`

    // The reason why the model stopped generating tokens.
    // Stops if the model hits a natural stop point or a provided stop sequence. Returns the length if the tokens reach the specified maximum number of tokens.
    FinishReason *string `mandatory:"true" json:"finishReason"`

    Logprobs *Logprobs `mandatory:"false" json:"logprobs"`
}

func (Choice) String

func (m Choice) String() string

func (Choice) ValidateEnumValue

func (m Choice) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type Citation

Citation A section of the generated reply which cites external knowledge.

type Citation struct {

    // The index of text that the citation starts at, counting from zero.
    Start *int `mandatory:"true" json:"start"`

    // The index of text that the citation ends after, counting from zero.
    End *int `mandatory:"true" json:"end"`

    // The text of the citation
    Text *string `mandatory:"true" json:"text"`

    // Identifiers of documents cited by this section of the generated reply.
    DocumentIds []string `mandatory:"true" json:"documentIds"`
}

func (Citation) String

func (m Citation) String() string

func (Citation) ValidateEnumValue

func (m Citation) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereChatRequest

CohereChatRequest Details for the chat request for Cohere models.

type CohereChatRequest struct {

    // Text input for the model to respond to.
    Message *string `mandatory:"true" json:"message"`

    // A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's message.
    ChatHistory []CohereMessage `mandatory:"false" json:"chatHistory"`

    // list of relevant documents that the model can cite to generate a more accurate reply.
    // Some suggested keys are "text", "author", and "date". For better generation quality, it is
    // recommended to keep the total word count of the strings in the dictionary to under 300
    // words.
    Documents []interface{} `mandatory:"false" json:"documents"`

    // When true, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's message will be generated.
    IsSearchQueriesOnly *bool `mandatory:"false" json:"isSearchQueriesOnly"`

    // When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style. Default preambles vary for different models.
    PreambleOverride *string `mandatory:"false" json:"preambleOverride"`

    // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available.
    IsStream *bool `mandatory:"false" json:"isStream"`

    // The maximum number of tokens to predict for each response. Includes input plus output tokens.
    MaxTokens *int `mandatory:"false" json:"maxTokens"`

    // A number that sets the randomness of the generated output. A lower temperature means a less random generations.
    // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.
    Temperature *float64 `mandatory:"false" json:"temperature"`

    // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is 0 which disables this method and considers all tokens. To set a number for the likely tokens, choose an integer between 1 and 500.
    // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen.
    TopK *int `mandatory:"false" json:"topK"`

    // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step.
    // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1.0 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k.
    TopP *float64 `mandatory:"false" json:"topP"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. Set to 0 to disable.
    FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens.
    // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable.
    PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"`
}

func (CohereChatRequest) MarshalJSON

func (m CohereChatRequest) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereChatRequest) String

func (m CohereChatRequest) String() string

func (CohereChatRequest) ValidateEnumValue

func (m CohereChatRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereChatResponse

CohereChatResponse The response to the chat conversation.

type CohereChatResponse struct {

    // Contents of the reply generated by the model.
    Text *string `mandatory:"true" json:"text"`

    // Inline citations for the generated reply.
    Citations []Citation `mandatory:"false" json:"citations"`

    // Denotes that a search for documents is required.
    IsSearchRequired *bool `mandatory:"false" json:"isSearchRequired"`

    // Generated search queries.
    SearchQueries []SearchQuery `mandatory:"false" json:"searchQueries"`

    // Documents seen by the model when generating the reply. Each document is a JSON String
    // representing the field and values of the document.
    Documents []interface{} `mandatory:"false" json:"documents"`

    // Why the generation was completed.
    FinishReason CohereChatResponseFinishReasonEnum `mandatory:"true" json:"finishReason"`
}

func (CohereChatResponse) MarshalJSON

func (m CohereChatResponse) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereChatResponse) String

func (m CohereChatResponse) String() string

func (CohereChatResponse) ValidateEnumValue

func (m CohereChatResponse) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereChatResponseFinishReasonEnum

CohereChatResponseFinishReasonEnum Enum with underlying type: string

type CohereChatResponseFinishReasonEnum string

Set of constants representing the allowable values for CohereChatResponseFinishReasonEnum

const (
    CohereChatResponseFinishReasonComplete   CohereChatResponseFinishReasonEnum = "COMPLETE"
    CohereChatResponseFinishReasonErrorToxic CohereChatResponseFinishReasonEnum = "ERROR_TOXIC"
    CohereChatResponseFinishReasonErrorLimit CohereChatResponseFinishReasonEnum = "ERROR_LIMIT"
    CohereChatResponseFinishReasonError      CohereChatResponseFinishReasonEnum = "ERROR"
    CohereChatResponseFinishReasonUserCancel CohereChatResponseFinishReasonEnum = "USER_CANCEL"
    CohereChatResponseFinishReasonMaxTokens  CohereChatResponseFinishReasonEnum = "MAX_TOKENS"
)

func GetCohereChatResponseFinishReasonEnumValues

func GetCohereChatResponseFinishReasonEnumValues() []CohereChatResponseFinishReasonEnum

GetCohereChatResponseFinishReasonEnumValues Enumerates the set of values for CohereChatResponseFinishReasonEnum

func GetMappingCohereChatResponseFinishReasonEnum

func GetMappingCohereChatResponseFinishReasonEnum(val string) (CohereChatResponseFinishReasonEnum, bool)

GetMappingCohereChatResponseFinishReasonEnum performs case Insensitive comparison on enum value and return the desired enum

type CohereLlmInferenceRequest

CohereLlmInferenceRequest Details for the text generation request for Cohere models.

type CohereLlmInferenceRequest struct {

    // Represents the prompt to be completed. The trailing white spaces are trimmed before completion.
    Prompt *string `mandatory:"true" json:"prompt"`

    // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available.
    IsStream *bool `mandatory:"false" json:"isStream"`

    // The number of generated texts that will be returned.
    NumGenerations *int `mandatory:"false" json:"numGenerations"`

    // Whether or not to return the user prompt in the response. This option only applies to non-stream results.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // The maximum number of tokens to predict for each response. Includes input plus output tokens.
    MaxTokens *int `mandatory:"false" json:"maxTokens"`

    // A number that sets the randomness of the generated output. A lower temperature means a less random generations.
    // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.
    Temperature *float64 `mandatory:"false" json:"temperature"`

    // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is 0 which disables this method and considers all tokens. To set a number for the likely tokens, choose an integer between 1 and 500.
    // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen.
    TopK *int `mandatory:"false" json:"topK"`

    // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step.
    // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1.0 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k.
    TopP *float64 `mandatory:"false" json:"topP"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. Set to 0 to disable.
    FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens.
    // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable.
    PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"`

    // The generated text is cut at the end of the earliest occurrence of this stop sequence. The generated text will include this stop sequence.
    StopSequences []string `mandatory:"false" json:"stopSequences"`

    // Specifies how and if the token likelihoods are returned with the response.
    ReturnLikelihoods CohereLlmInferenceRequestReturnLikelihoodsEnum `mandatory:"false" json:"returnLikelihoods,omitempty"`

    // For an input that's longer than the maximum token length, specifies which part of the input text will be truncated.
    Truncate CohereLlmInferenceRequestTruncateEnum `mandatory:"false" json:"truncate,omitempty"`
}

func (CohereLlmInferenceRequest) MarshalJSON

func (m CohereLlmInferenceRequest) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereLlmInferenceRequest) String

func (m CohereLlmInferenceRequest) String() string

func (CohereLlmInferenceRequest) ValidateEnumValue

func (m CohereLlmInferenceRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereLlmInferenceRequestReturnLikelihoodsEnum

CohereLlmInferenceRequestReturnLikelihoodsEnum Enum with underlying type: string

type CohereLlmInferenceRequestReturnLikelihoodsEnum string

Set of constants representing the allowable values for CohereLlmInferenceRequestReturnLikelihoodsEnum

const (
    CohereLlmInferenceRequestReturnLikelihoodsNone       CohereLlmInferenceRequestReturnLikelihoodsEnum = "NONE"
    CohereLlmInferenceRequestReturnLikelihoodsAll        CohereLlmInferenceRequestReturnLikelihoodsEnum = "ALL"
    CohereLlmInferenceRequestReturnLikelihoodsGeneration CohereLlmInferenceRequestReturnLikelihoodsEnum = "GENERATION"
)

func GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues

func GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues() []CohereLlmInferenceRequestReturnLikelihoodsEnum

GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues Enumerates the set of values for CohereLlmInferenceRequestReturnLikelihoodsEnum

func GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum

func GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum(val string) (CohereLlmInferenceRequestReturnLikelihoodsEnum, bool)

GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum performs case Insensitive comparison on enum value and return the desired enum

type CohereLlmInferenceRequestTruncateEnum

CohereLlmInferenceRequestTruncateEnum Enum with underlying type: string

type CohereLlmInferenceRequestTruncateEnum string

Set of constants representing the allowable values for CohereLlmInferenceRequestTruncateEnum

const (
    CohereLlmInferenceRequestTruncateNone  CohereLlmInferenceRequestTruncateEnum = "NONE"
    CohereLlmInferenceRequestTruncateStart CohereLlmInferenceRequestTruncateEnum = "START"
    CohereLlmInferenceRequestTruncateEnd   CohereLlmInferenceRequestTruncateEnum = "END"
)

func GetCohereLlmInferenceRequestTruncateEnumValues

func GetCohereLlmInferenceRequestTruncateEnumValues() []CohereLlmInferenceRequestTruncateEnum

GetCohereLlmInferenceRequestTruncateEnumValues Enumerates the set of values for CohereLlmInferenceRequestTruncateEnum

func GetMappingCohereLlmInferenceRequestTruncateEnum

func GetMappingCohereLlmInferenceRequestTruncateEnum(val string) (CohereLlmInferenceRequestTruncateEnum, bool)

GetMappingCohereLlmInferenceRequestTruncateEnum performs case Insensitive comparison on enum value and return the desired enum

type CohereLlmInferenceResponse

CohereLlmInferenceResponse The generated text result to return.

type CohereLlmInferenceResponse struct {

    // Each prompt in the input array has an array of GeneratedText, controlled by numGenerations parameter in the request.
    GeneratedTexts []GeneratedText `mandatory:"true" json:"generatedTexts"`

    // The date and time that the model was created in an RFC3339 formatted datetime string.
    TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"`

    // Represents the original prompt. Applies only to non-stream responses.
    Prompt *string `mandatory:"false" json:"prompt"`
}

func (CohereLlmInferenceResponse) MarshalJSON

func (m CohereLlmInferenceResponse) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereLlmInferenceResponse) String

func (m CohereLlmInferenceResponse) String() string

func (CohereLlmInferenceResponse) ValidateEnumValue

func (m CohereLlmInferenceResponse) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereMessage

CohereMessage An message that represents a single dialogue of chat

type CohereMessage struct {

    // One of CHATBOT|USER to identify who the message is coming from.
    Role CohereMessageRoleEnum `mandatory:"true" json:"role"`

    // Contents of the chat message.
    Message *string `mandatory:"true" json:"message"`
}

func (CohereMessage) String

func (m CohereMessage) String() string

func (CohereMessage) ValidateEnumValue

func (m CohereMessage) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereMessageRoleEnum

CohereMessageRoleEnum Enum with underlying type: string

type CohereMessageRoleEnum string

Set of constants representing the allowable values for CohereMessageRoleEnum

const (
    CohereMessageRoleChatbot CohereMessageRoleEnum = "CHATBOT"
    CohereMessageRoleUser    CohereMessageRoleEnum = "USER"
)

func GetCohereMessageRoleEnumValues

func GetCohereMessageRoleEnumValues() []CohereMessageRoleEnum

GetCohereMessageRoleEnumValues Enumerates the set of values for CohereMessageRoleEnum

func GetMappingCohereMessageRoleEnum

func GetMappingCohereMessageRoleEnum(val string) (CohereMessageRoleEnum, bool)

GetMappingCohereMessageRoleEnum performs case Insensitive comparison on enum value and return the desired enum

type DedicatedServingMode

DedicatedServingMode The model's serving mode is dedicated serving and has an endpoint on a dedicated AI cluster.

type DedicatedServingMode struct {

    // The OCID of the endpoint to use.
    EndpointId *string `mandatory:"true" json:"endpointId"`
}

func (DedicatedServingMode) MarshalJSON

func (m DedicatedServingMode) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (DedicatedServingMode) String

func (m DedicatedServingMode) String() string

func (DedicatedServingMode) ValidateEnumValue

func (m DedicatedServingMode) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type EmbedTextDetails

EmbedTextDetails Details for the request to embed texts.

type EmbedTextDetails struct {

    // Provide a list of strings with a maximum number of 96 entries. Each string can be words, a phrase, or a paragraph. The maximum length of each string entry in the list is 512 tokens.
    Inputs []string `mandatory:"true" json:"inputs"`

    ServingMode ServingMode `mandatory:"true" json:"servingMode"`

    // The OCID of compartment that the user is authorized to use to call into the Generative AI service.
    CompartmentId *string `mandatory:"true" json:"compartmentId"`

    // Whether or not to include the original inputs in the response. Results are index-based.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // For an input that's longer than the maximum token length, specifies which part of the input text will be truncated.
    Truncate EmbedTextDetailsTruncateEnum `mandatory:"false" json:"truncate,omitempty"`

    // Specifies the input type.
    InputType EmbedTextDetailsInputTypeEnum `mandatory:"false" json:"inputType,omitempty"`
}

func (EmbedTextDetails) String

func (m EmbedTextDetails) String() string

func (*EmbedTextDetails) UnmarshalJSON

func (m *EmbedTextDetails) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (EmbedTextDetails) ValidateEnumValue

func (m EmbedTextDetails) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type EmbedTextDetailsInputTypeEnum

EmbedTextDetailsInputTypeEnum Enum with underlying type: string

type EmbedTextDetailsInputTypeEnum string

Set of constants representing the allowable values for EmbedTextDetailsInputTypeEnum

const (
    EmbedTextDetailsInputTypeSearchDocument EmbedTextDetailsInputTypeEnum = "SEARCH_DOCUMENT"
    EmbedTextDetailsInputTypeSearchQuery    EmbedTextDetailsInputTypeEnum = "SEARCH_QUERY"
    EmbedTextDetailsInputTypeClassification EmbedTextDetailsInputTypeEnum = "CLASSIFICATION"
    EmbedTextDetailsInputTypeClustering     EmbedTextDetailsInputTypeEnum = "CLUSTERING"
)

func GetEmbedTextDetailsInputTypeEnumValues

func GetEmbedTextDetailsInputTypeEnumValues() []EmbedTextDetailsInputTypeEnum

GetEmbedTextDetailsInputTypeEnumValues Enumerates the set of values for EmbedTextDetailsInputTypeEnum

func GetMappingEmbedTextDetailsInputTypeEnum

func GetMappingEmbedTextDetailsInputTypeEnum(val string) (EmbedTextDetailsInputTypeEnum, bool)

GetMappingEmbedTextDetailsInputTypeEnum performs case Insensitive comparison on enum value and return the desired enum

type EmbedTextDetailsTruncateEnum

EmbedTextDetailsTruncateEnum Enum with underlying type: string

type EmbedTextDetailsTruncateEnum string

Set of constants representing the allowable values for EmbedTextDetailsTruncateEnum

const (
    EmbedTextDetailsTruncateNone  EmbedTextDetailsTruncateEnum = "NONE"
    EmbedTextDetailsTruncateStart EmbedTextDetailsTruncateEnum = "START"
    EmbedTextDetailsTruncateEnd   EmbedTextDetailsTruncateEnum = "END"
)

func GetEmbedTextDetailsTruncateEnumValues

func GetEmbedTextDetailsTruncateEnumValues() []EmbedTextDetailsTruncateEnum

GetEmbedTextDetailsTruncateEnumValues Enumerates the set of values for EmbedTextDetailsTruncateEnum

func GetMappingEmbedTextDetailsTruncateEnum

func GetMappingEmbedTextDetailsTruncateEnum(val string) (EmbedTextDetailsTruncateEnum, bool)

GetMappingEmbedTextDetailsTruncateEnum performs case Insensitive comparison on enum value and return the desired enum

type EmbedTextRequest

EmbedTextRequest wrapper for the EmbedText operation

# See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.65.0/generativeaiinference/EmbedText.go.html to see an example of how to use EmbedTextRequest.

type EmbedTextRequest struct {

    // Details for generating the embed response.
    EmbedTextDetails `contributesTo:"body"`

    // A token that uniquely identifies a request so it can be retried in case of a timeout or
    // server error without risk of executing that same action again. Retry tokens expire after 24
    // hours, but can be invalidated before then due to conflicting operations. For example, if a resource
    // has been deleted and purged from the system, then a retry of the original creation request
    // might be rejected.
    OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"`

    // The client request ID for tracing.
    OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`

    // Metadata about the request. This information will not be transmitted to the service, but
    // represents information that the SDK will consume to drive retry behavior.
    RequestMetadata common.RequestMetadata
}

func (EmbedTextRequest) BinaryRequestBody

func (request EmbedTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)

BinaryRequestBody implements the OCIRequest interface

func (EmbedTextRequest) HTTPRequest

func (request EmbedTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)

HTTPRequest implements the OCIRequest interface

func (EmbedTextRequest) RetryPolicy

func (request EmbedTextRequest) RetryPolicy() *common.RetryPolicy

RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.

func (EmbedTextRequest) String

func (request EmbedTextRequest) String() string

func (EmbedTextRequest) ValidateEnumValue

func (request EmbedTextRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type EmbedTextResponse

EmbedTextResponse wrapper for the EmbedText operation

type EmbedTextResponse struct {

    // The underlying http response
    RawResponse *http.Response

    // The EmbedTextResult instance
    EmbedTextResult `presentIn:"body"`

    // For optimistic concurrency control. See `if-match`.
    Etag *string `presentIn:"header" name:"etag"`

    // Unique Oracle-assigned identifier for the request. If you need to contact
    // Oracle about a particular request, please provide the request ID.
    OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}

func (EmbedTextResponse) HTTPResponse

func (response EmbedTextResponse) HTTPResponse() *http.Response

HTTPResponse implements the OCIResponse interface

func (EmbedTextResponse) String

func (response EmbedTextResponse) String() string

type EmbedTextResult

EmbedTextResult The generated embedded result to return.

type EmbedTextResult struct {

    // A unique identifier for the generated result.
    Id *string `mandatory:"true" json:"id"`

    // The embeddings corresponding to inputs.
    Embeddings [][]float32 `mandatory:"true" json:"embeddings"`

    // The original inputs. Only present if "isEcho" is set to true.
    Inputs []string `mandatory:"false" json:"inputs"`

    // The OCID of the model used in this inference request.
    ModelId *string `mandatory:"false" json:"modelId"`

    // The version of the model.
    ModelVersion *string `mandatory:"false" json:"modelVersion"`
}

func (EmbedTextResult) String

func (m EmbedTextResult) String() string

func (EmbedTextResult) ValidateEnumValue

func (m EmbedTextResult) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GenerateTextDetails

GenerateTextDetails Details for the request to generate text.

type GenerateTextDetails struct {

    // The OCID of compartment that the user is authorized to use to call into the Generative AI service.
    CompartmentId *string `mandatory:"true" json:"compartmentId"`

    ServingMode ServingMode `mandatory:"true" json:"servingMode"`

    InferenceRequest LlmInferenceRequest `mandatory:"true" json:"inferenceRequest"`
}

func (GenerateTextDetails) String

func (m GenerateTextDetails) String() string

func (*GenerateTextDetails) UnmarshalJSON

func (m *GenerateTextDetails) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (GenerateTextDetails) ValidateEnumValue

func (m GenerateTextDetails) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GenerateTextRequest

GenerateTextRequest wrapper for the GenerateText operation

# See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.65.0/generativeaiinference/GenerateText.go.html to see an example of how to use GenerateTextRequest.

type GenerateTextRequest struct {

    // Details for generating the text response.
    GenerateTextDetails `contributesTo:"body"`

    // A token that uniquely identifies a request so it can be retried in case of a timeout or
    // server error without risk of executing that same action again. Retry tokens expire after 24
    // hours, but can be invalidated before then due to conflicting operations. For example, if a resource
    // has been deleted and purged from the system, then a retry of the original creation request
    // might be rejected.
    OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"`

    // The client request ID for tracing.
    OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`

    // Metadata about the request. This information will not be transmitted to the service, but
    // represents information that the SDK will consume to drive retry behavior.
    RequestMetadata common.RequestMetadata
}

func (GenerateTextRequest) BinaryRequestBody

func (request GenerateTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)

BinaryRequestBody implements the OCIRequest interface

func (GenerateTextRequest) HTTPRequest

func (request GenerateTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)

HTTPRequest implements the OCIRequest interface

func (GenerateTextRequest) RetryPolicy

func (request GenerateTextRequest) RetryPolicy() *common.RetryPolicy

RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.

func (GenerateTextRequest) String

func (request GenerateTextRequest) String() string

func (GenerateTextRequest) ValidateEnumValue

func (request GenerateTextRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GenerateTextResponse

GenerateTextResponse wrapper for the GenerateText operation

type GenerateTextResponse struct {

    // The underlying http response
    RawResponse *http.Response

    // The GenerateTextResult instance
    GenerateTextResult `presentIn:"body"`

    // For optimistic concurrency control. See `if-match`.
    Etag *string `presentIn:"header" name:"etag"`

    // Unique Oracle-assigned identifier for the request. If you need to contact
    // Oracle about a particular request, please provide the request ID.
    OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}

func (GenerateTextResponse) HTTPResponse

func (response GenerateTextResponse) HTTPResponse() *http.Response

HTTPResponse implements the OCIResponse interface

func (GenerateTextResponse) String

func (response GenerateTextResponse) String() string

type GenerateTextResult

GenerateTextResult The generated text result to return.

type GenerateTextResult struct {

    // The OCID of the model used in this inference request.
    ModelId *string `mandatory:"true" json:"modelId"`

    // The version of the model.
    ModelVersion *string `mandatory:"true" json:"modelVersion"`

    InferenceResponse LlmInferenceResponse `mandatory:"true" json:"inferenceResponse"`
}

func (GenerateTextResult) String

func (m GenerateTextResult) String() string

func (*GenerateTextResult) UnmarshalJSON

func (m *GenerateTextResult) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (GenerateTextResult) ValidateEnumValue

func (m GenerateTextResult) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GeneratedText

GeneratedText The text generated during each run.

type GeneratedText struct {

    // A unique identifier for this text generation.
    Id *string `mandatory:"true" json:"id"`

    // The generated text.
    Text *string `mandatory:"true" json:"text"`

    // The overall likelihood of the generated text.
    // When a large language model generates a new token for the output text, a likelihood is assigned to all tokens, where tokens with higher likelihoods are more likely to follow the current token. For example, it's more likely that the word favorite is followed by the word food or book rather than the word zebra. A lower likelihood means that it's less likely that token follows the current token.
    Likelihood *float64 `mandatory:"true" json:"likelihood"`

    // The reason why the model stopped generating tokens.
    // A model stops generating tokens if the model hits a natural stop point or reaches a provided stop sequence.
    FinishReason *string `mandatory:"false" json:"finishReason"`

    // A collection of generated tokens and their corresponding likelihoods.
    TokenLikelihoods []TokenLikelihood `mandatory:"false" json:"tokenLikelihoods"`
}

func (GeneratedText) String

func (m GeneratedText) String() string

func (GeneratedText) ValidateEnumValue

func (m GeneratedText) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GenerativeAiInferenceClient

GenerativeAiInferenceClient a client for GenerativeAiInference

type GenerativeAiInferenceClient struct {
    common.BaseClient
    // contains filtered or unexported fields
}

func NewGenerativeAiInferenceClientWithConfigurationProvider

func NewGenerativeAiInferenceClientWithConfigurationProvider(configProvider common.ConfigurationProvider) (client GenerativeAiInferenceClient, err error)

NewGenerativeAiInferenceClientWithConfigurationProvider Creates a new default GenerativeAiInference client with the given configuration provider. the configuration provider will be used for the default signer as well as reading the region

func NewGenerativeAiInferenceClientWithOboToken

func NewGenerativeAiInferenceClientWithOboToken(configProvider common.ConfigurationProvider, oboToken string) (client GenerativeAiInferenceClient, err error)

NewGenerativeAiInferenceClientWithOboToken Creates a new default GenerativeAiInference client with the given configuration provider. The obotoken will be added to default headers and signed; the configuration provider will be used for the signer

as well as reading the region

func (GenerativeAiInferenceClient) Chat

func (client GenerativeAiInferenceClient) Chat(ctx context.Context, request ChatRequest) (response ChatResponse, err error)

Chat Creates a response for the given conversation.

# See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.65.0/generativeaiinference/Chat.go.html to see an example of how to use Chat API. A default retry strategy applies to this operation Chat()

func (*GenerativeAiInferenceClient) ConfigurationProvider

func (client *GenerativeAiInferenceClient) ConfigurationProvider() *common.ConfigurationProvider

ConfigurationProvider the ConfigurationProvider used in this client, or null if none set

func (GenerativeAiInferenceClient) EmbedText

func (client GenerativeAiInferenceClient) EmbedText(ctx context.Context, request EmbedTextRequest) (response EmbedTextResponse, err error)

EmbedText Produces embeddings for the inputs. An embedding is numeric representation of a piece of text. This text can be a phrase, a sentence, or one or more paragraphs. The Generative AI embedding model transforms each phrase, sentence, or paragraph that you input, into an array with 1024 numbers. You can use these embeddings for finding similarity in your input text such as finding phrases that are similar in context or category. Embeddings are mostly used for semantic searches where the search function focuses on the meaning of the text that it's searching through rather than finding results based on keywords.

# See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.65.0/generativeaiinference/EmbedText.go.html to see an example of how to use EmbedText API. A default retry strategy applies to this operation EmbedText()

func (GenerativeAiInferenceClient) GenerateText

func (client GenerativeAiInferenceClient) GenerateText(ctx context.Context, request GenerateTextRequest) (response GenerateTextResponse, err error)

GenerateText Generates a text response based on the user prompt.

# See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.65.0/generativeaiinference/GenerateText.go.html to see an example of how to use GenerateText API. A default retry strategy applies to this operation GenerateText()

func (*GenerativeAiInferenceClient) SetRegion

func (client *GenerativeAiInferenceClient) SetRegion(region string)

SetRegion overrides the region of this client.

func (GenerativeAiInferenceClient) SummarizeText

func (client GenerativeAiInferenceClient) SummarizeText(ctx context.Context, request SummarizeTextRequest) (response SummarizeTextResponse, err error)

SummarizeText Summarizes the input text.

# See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.65.0/generativeaiinference/SummarizeText.go.html to see an example of how to use SummarizeText API. A default retry strategy applies to this operation SummarizeText()

type GenericChatRequest

GenericChatRequest Details for the chat request.

type GenericChatRequest struct {

    // The series of messages associated with this chat completion request. It should include previous messages in the conversation. Each message has a role and content.
    Messages []Message `mandatory:"false" json:"messages"`

    // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available.
    IsStream *bool `mandatory:"false" json:"isStream"`

    // The number of of generated texts that will be returned.
    NumGenerations *int `mandatory:"false" json:"numGenerations"`

    // Whether or not to return the user prompt in the response. Applies only to non-stream results.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is -1 which means to consider all tokens. Setting to 0 disables this method and considers all tokens.
    // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen.
    TopK *int `mandatory:"false" json:"topK"`

    // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step.
    // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k.
    TopP *float64 `mandatory:"false" json:"topP"`

    // A number that sets the randomness of the generated output. A lower temperature means a less random generations.
    // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.
    Temperature *float64 `mandatory:"false" json:"temperature"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. Set to 0 to disable.
    FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens.
    // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable.
    PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"`

    // List of strings that stop the generation if they are generated for the response text. The returned output will not contain the stop strings.
    Stop []string `mandatory:"false" json:"stop"`

    // Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens.
    // For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response.
    LogProbs *int `mandatory:"false" json:"logProbs"`

    // The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus max_tokens cannot exceed the model's context length.
    MaxTokens *int `mandatory:"false" json:"maxTokens"`

    // Modify the likelihood of specified tokens appearing in the completion.
    LogitBias *interface{} `mandatory:"false" json:"logitBias"`
}

func (GenericChatRequest) MarshalJSON

func (m GenericChatRequest) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (GenericChatRequest) String

func (m GenericChatRequest) String() string

func (GenericChatRequest) ValidateEnumValue

func (m GenericChatRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GenericChatResponse

GenericChatResponse The response to the chat conversation.

type GenericChatResponse struct {

    // The Unix timestamp (in seconds) of when the generation was created.
    TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"`

    // A list of generated texts. Can be more than one if n is greater than 1.
    Choices []ChatChoice `mandatory:"true" json:"choices"`
}

func (GenericChatResponse) MarshalJSON

func (m GenericChatResponse) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (GenericChatResponse) String

func (m GenericChatResponse) String() string

func (GenericChatResponse) ValidateEnumValue

func (m GenericChatResponse) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type LlamaLlmInferenceRequest

LlamaLlmInferenceRequest Details for the text generation request for Llama models.

type LlamaLlmInferenceRequest struct {

    // Represents the prompt to be completed. The trailing white spaces are trimmed before completion.
    Prompt *string `mandatory:"false" json:"prompt"`

    // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available.
    IsStream *bool `mandatory:"false" json:"isStream"`

    // The number of of generated texts that will be returned.
    NumGenerations *int `mandatory:"false" json:"numGenerations"`

    // Whether or not to return the user prompt in the response. Applies only to non-stream results.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is -1 which means to consider all tokens. Setting to 0 disables this method and considers all tokens.
    // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen.
    TopK *int `mandatory:"false" json:"topK"`

    // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step.
    // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k.
    TopP *float64 `mandatory:"false" json:"topP"`

    // A number that sets the randomness of the generated output. A lower temperature means a less random generations.
    // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.
    Temperature *float64 `mandatory:"false" json:"temperature"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. Set to 0 to disable.
    FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens.
    // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable.
    PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"`

    // List of strings that stop the generation if they are generated for the response text. The returned output will not contain the stop strings.
    Stop []string `mandatory:"false" json:"stop"`

    // Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens.
    // For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response.
    LogProbs *int `mandatory:"false" json:"logProbs"`

    // The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus max_tokens cannot exceed the model's context length.
    MaxTokens *int `mandatory:"false" json:"maxTokens"`
}

func (LlamaLlmInferenceRequest) MarshalJSON

func (m LlamaLlmInferenceRequest) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (LlamaLlmInferenceRequest) String

func (m LlamaLlmInferenceRequest) String() string

func (LlamaLlmInferenceRequest) ValidateEnumValue

func (m LlamaLlmInferenceRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type LlamaLlmInferenceResponse

LlamaLlmInferenceResponse The generated text result to return.

type LlamaLlmInferenceResponse struct {

    // The Unix timestamp (in seconds) of when the generation was created.
    Created *common.SDKTime `mandatory:"true" json:"created"`

    // A list of generated texts. Can be more than one if n is greater than 1.
    Choices []Choice `mandatory:"true" json:"choices"`
}

func (LlamaLlmInferenceResponse) MarshalJSON

func (m LlamaLlmInferenceResponse) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (LlamaLlmInferenceResponse) String

func (m LlamaLlmInferenceResponse) String() string

func (LlamaLlmInferenceResponse) ValidateEnumValue

func (m LlamaLlmInferenceResponse) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type LlmInferenceRequest

LlmInferenceRequest The base class for the inference requests.

type LlmInferenceRequest interface {
}

type LlmInferenceRequestRuntimeTypeEnum

LlmInferenceRequestRuntimeTypeEnum Enum with underlying type: string

type LlmInferenceRequestRuntimeTypeEnum string

Set of constants representing the allowable values for LlmInferenceRequestRuntimeTypeEnum

const (
    LlmInferenceRequestRuntimeTypeCohere LlmInferenceRequestRuntimeTypeEnum = "COHERE"
    LlmInferenceRequestRuntimeTypeLlama  LlmInferenceRequestRuntimeTypeEnum = "LLAMA"
)

func GetLlmInferenceRequestRuntimeTypeEnumValues

func GetLlmInferenceRequestRuntimeTypeEnumValues() []LlmInferenceRequestRuntimeTypeEnum

GetLlmInferenceRequestRuntimeTypeEnumValues Enumerates the set of values for LlmInferenceRequestRuntimeTypeEnum

func GetMappingLlmInferenceRequestRuntimeTypeEnum

func GetMappingLlmInferenceRequestRuntimeTypeEnum(val string) (LlmInferenceRequestRuntimeTypeEnum, bool)

GetMappingLlmInferenceRequestRuntimeTypeEnum performs case Insensitive comparison on enum value and return the desired enum

type LlmInferenceResponse

LlmInferenceResponse The base class for inference responses.

type LlmInferenceResponse interface {
}

type LlmInferenceResponseRuntimeTypeEnum

LlmInferenceResponseRuntimeTypeEnum Enum with underlying type: string

type LlmInferenceResponseRuntimeTypeEnum string

Set of constants representing the allowable values for LlmInferenceResponseRuntimeTypeEnum

const (
    LlmInferenceResponseRuntimeTypeCohere LlmInferenceResponseRuntimeTypeEnum = "COHERE"
    LlmInferenceResponseRuntimeTypeLlama  LlmInferenceResponseRuntimeTypeEnum = "LLAMA"
    LlmInferenceResponseRuntimeTypeOpenai LlmInferenceResponseRuntimeTypeEnum = "OPENAI"
    LlmInferenceResponseRuntimeTypeDalle3 LlmInferenceResponseRuntimeTypeEnum = "DALLE3"
)

func GetLlmInferenceResponseRuntimeTypeEnumValues

func GetLlmInferenceResponseRuntimeTypeEnumValues() []LlmInferenceResponseRuntimeTypeEnum

GetLlmInferenceResponseRuntimeTypeEnumValues Enumerates the set of values for LlmInferenceResponseRuntimeTypeEnum

func GetMappingLlmInferenceResponseRuntimeTypeEnum

func GetMappingLlmInferenceResponseRuntimeTypeEnum(val string) (LlmInferenceResponseRuntimeTypeEnum, bool)

GetMappingLlmInferenceResponseRuntimeTypeEnum performs case Insensitive comparison on enum value and return the desired enum

type Logprobs

Logprobs Returns if the logarithmic probabilites is set.

type Logprobs struct {

    // The text offset.
    TextOffset []int `mandatory:"false" json:"textOffset"`

    // The logarithmic probabilites of the output token.
    TokenLogprobs []float64 `mandatory:"false" json:"tokenLogprobs"`

    // The list of output tokens.
    Tokens []string `mandatory:"false" json:"tokens"`

    // The logarithmic probabilities of each of the top k tokens.
    TopLogprobs []map[string]string `mandatory:"false" json:"topLogprobs"`
}

func (Logprobs) String

func (m Logprobs) String() string

func (Logprobs) ValidateEnumValue

func (m Logprobs) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type Message

Message An message that represents a single dialogue of chat

type Message struct {

    // Indicates who is giving the current message.
    Role *string `mandatory:"true" json:"role"`

    // Contents of the chat message.
    Content []ChatContent `mandatory:"true" json:"content"`
}

func (Message) String

func (m Message) String() string

func (*Message) UnmarshalJSON

func (m *Message) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (Message) ValidateEnumValue

func (m Message) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type OnDemandServingMode

OnDemandServingMode The model's serving mode is on-demand serving on a shared infrastructure.

type OnDemandServingMode struct {

    // The unique ID of a model to use. Can use list Models API to list available models.
    ModelId *string `mandatory:"true" json:"modelId"`
}

func (OnDemandServingMode) MarshalJSON

func (m OnDemandServingMode) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (OnDemandServingMode) String

func (m OnDemandServingMode) String() string

func (OnDemandServingMode) ValidateEnumValue

func (m OnDemandServingMode) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type SearchQuery

SearchQuery The generated search query.

type SearchQuery struct {

    // The text of the search query.
    Text *string `mandatory:"true" json:"text"`
}

func (SearchQuery) String

func (m SearchQuery) String() string

func (SearchQuery) ValidateEnumValue

func (m SearchQuery) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type ServingMode

ServingMode The model's serving mode, which could be on-demand serving or dedicated serving.

type ServingMode interface {
}

type ServingModeServingTypeEnum

ServingModeServingTypeEnum Enum with underlying type: string

type ServingModeServingTypeEnum string

Set of constants representing the allowable values for ServingModeServingTypeEnum

const (
    ServingModeServingTypeOnDemand  ServingModeServingTypeEnum = "ON_DEMAND"
    ServingModeServingTypeDedicated ServingModeServingTypeEnum = "DEDICATED"
)

func GetMappingServingModeServingTypeEnum

func GetMappingServingModeServingTypeEnum(val string) (ServingModeServingTypeEnum, bool)

GetMappingServingModeServingTypeEnum performs case Insensitive comparison on enum value and return the desired enum

func GetServingModeServingTypeEnumValues

func GetServingModeServingTypeEnumValues() []ServingModeServingTypeEnum

GetServingModeServingTypeEnumValues Enumerates the set of values for ServingModeServingTypeEnum

type SummarizeTextDetails

SummarizeTextDetails Details for the request to summarize text.

type SummarizeTextDetails struct {

    // The input string to be summarized.
    Input *string `mandatory:"true" json:"input"`

    ServingMode ServingMode `mandatory:"true" json:"servingMode"`

    // The OCID of compartment that the user is authorized to use to call into the Generative AI service.
    CompartmentId *string `mandatory:"true" json:"compartmentId"`

    // Whether or not to include the original inputs in the response.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // A number that sets the randomness of the generated output. Lower temperatures mean less random generations.
    // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0, and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.
    Temperature *float64 `mandatory:"false" json:"temperature"`

    // A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". For example, "focusing on the next steps" or "written by Yoda".
    AdditionalCommand *string `mandatory:"false" json:"additionalCommand"`

    // Indicates the approximate length of the summary. If "AUTO" is selected, the best option will be picked based on the input text.
    Length SummarizeTextDetailsLengthEnum `mandatory:"false" json:"length,omitempty"`

    // Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If "AUTO" is selected, the best option will be picked based on the input text.
    Format SummarizeTextDetailsFormatEnum `mandatory:"false" json:"format,omitempty"`

    // Controls how close to the original text the summary is. High extractiveness summaries will lean towards reusing sentences verbatim, while low extractiveness summaries will tend to paraphrase more.
    Extractiveness SummarizeTextDetailsExtractivenessEnum `mandatory:"false" json:"extractiveness,omitempty"`
}

func (SummarizeTextDetails) String

func (m SummarizeTextDetails) String() string

func (*SummarizeTextDetails) UnmarshalJSON

func (m *SummarizeTextDetails) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (SummarizeTextDetails) ValidateEnumValue

func (m SummarizeTextDetails) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type SummarizeTextDetailsExtractivenessEnum

SummarizeTextDetailsExtractivenessEnum Enum with underlying type: string

type SummarizeTextDetailsExtractivenessEnum string

Set of constants representing the allowable values for SummarizeTextDetailsExtractivenessEnum

const (
    SummarizeTextDetailsExtractivenessLow    SummarizeTextDetailsExtractivenessEnum = "LOW"
    SummarizeTextDetailsExtractivenessMedium SummarizeTextDetailsExtractivenessEnum = "MEDIUM"
    SummarizeTextDetailsExtractivenessHigh   SummarizeTextDetailsExtractivenessEnum = "HIGH"
    SummarizeTextDetailsExtractivenessAuto   SummarizeTextDetailsExtractivenessEnum = "AUTO"
)

func GetMappingSummarizeTextDetailsExtractivenessEnum

func GetMappingSummarizeTextDetailsExtractivenessEnum(val string) (SummarizeTextDetailsExtractivenessEnum, bool)

GetMappingSummarizeTextDetailsExtractivenessEnum performs case Insensitive comparison on enum value and return the desired enum

func GetSummarizeTextDetailsExtractivenessEnumValues

func GetSummarizeTextDetailsExtractivenessEnumValues() []SummarizeTextDetailsExtractivenessEnum

GetSummarizeTextDetailsExtractivenessEnumValues Enumerates the set of values for SummarizeTextDetailsExtractivenessEnum

type SummarizeTextDetailsFormatEnum

SummarizeTextDetailsFormatEnum Enum with underlying type: string

type SummarizeTextDetailsFormatEnum string

Set of constants representing the allowable values for SummarizeTextDetailsFormatEnum

const (
    SummarizeTextDetailsFormatParagraph SummarizeTextDetailsFormatEnum = "PARAGRAPH"
    SummarizeTextDetailsFormatBullets   SummarizeTextDetailsFormatEnum = "BULLETS"
    SummarizeTextDetailsFormatAuto      SummarizeTextDetailsFormatEnum = "AUTO"
)

func GetMappingSummarizeTextDetailsFormatEnum

func GetMappingSummarizeTextDetailsFormatEnum(val string) (SummarizeTextDetailsFormatEnum, bool)

GetMappingSummarizeTextDetailsFormatEnum performs case Insensitive comparison on enum value and return the desired enum

func GetSummarizeTextDetailsFormatEnumValues

func GetSummarizeTextDetailsFormatEnumValues() []SummarizeTextDetailsFormatEnum

GetSummarizeTextDetailsFormatEnumValues Enumerates the set of values for SummarizeTextDetailsFormatEnum

type SummarizeTextDetailsLengthEnum

SummarizeTextDetailsLengthEnum Enum with underlying type: string

type SummarizeTextDetailsLengthEnum string

Set of constants representing the allowable values for SummarizeTextDetailsLengthEnum

const (
    SummarizeTextDetailsLengthShort  SummarizeTextDetailsLengthEnum = "SHORT"
    SummarizeTextDetailsLengthMedium SummarizeTextDetailsLengthEnum = "MEDIUM"
    SummarizeTextDetailsLengthLong   SummarizeTextDetailsLengthEnum = "LONG"
    SummarizeTextDetailsLengthAuto   SummarizeTextDetailsLengthEnum = "AUTO"
)

func GetMappingSummarizeTextDetailsLengthEnum

func GetMappingSummarizeTextDetailsLengthEnum(val string) (SummarizeTextDetailsLengthEnum, bool)

GetMappingSummarizeTextDetailsLengthEnum performs case Insensitive comparison on enum value and return the desired enum

func GetSummarizeTextDetailsLengthEnumValues

func GetSummarizeTextDetailsLengthEnumValues() []SummarizeTextDetailsLengthEnum

GetSummarizeTextDetailsLengthEnumValues Enumerates the set of values for SummarizeTextDetailsLengthEnum

type SummarizeTextRequest

SummarizeTextRequest wrapper for the SummarizeText operation

# See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.65.0/generativeaiinference/SummarizeText.go.html to see an example of how to use SummarizeTextRequest.

type SummarizeTextRequest struct {

    // Details for summarizing the text.
    SummarizeTextDetails `contributesTo:"body"`

    // A token that uniquely identifies a request so it can be retried in case of a timeout or
    // server error without risk of executing that same action again. Retry tokens expire after 24
    // hours, but can be invalidated before then due to conflicting operations. For example, if a resource
    // has been deleted and purged from the system, then a retry of the original creation request
    // might be rejected.
    OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"`

    // The client request ID for tracing.
    OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`

    // Metadata about the request. This information will not be transmitted to the service, but
    // represents information that the SDK will consume to drive retry behavior.
    RequestMetadata common.RequestMetadata
}

func (SummarizeTextRequest) BinaryRequestBody

func (request SummarizeTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)

BinaryRequestBody implements the OCIRequest interface

func (SummarizeTextRequest) HTTPRequest

func (request SummarizeTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)

HTTPRequest implements the OCIRequest interface

func (SummarizeTextRequest) RetryPolicy

func (request SummarizeTextRequest) RetryPolicy() *common.RetryPolicy

RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.

func (SummarizeTextRequest) String

func (request SummarizeTextRequest) String() string

func (SummarizeTextRequest) ValidateEnumValue

func (request SummarizeTextRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type SummarizeTextResponse

SummarizeTextResponse wrapper for the SummarizeText operation

type SummarizeTextResponse struct {

    // The underlying http response
    RawResponse *http.Response

    // The SummarizeTextResult instance
    SummarizeTextResult `presentIn:"body"`

    // For optimistic concurrency control. See `if-match`.
    Etag *string `presentIn:"header" name:"etag"`

    // Unique Oracle-assigned identifier for the request. If you need to contact
    // Oracle about a particular request, please provide the request ID.
    OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}

func (SummarizeTextResponse) HTTPResponse

func (response SummarizeTextResponse) HTTPResponse() *http.Response

HTTPResponse implements the OCIResponse interface

func (SummarizeTextResponse) String

func (response SummarizeTextResponse) String() string

type SummarizeTextResult

SummarizeTextResult Summarize text result to return to caller.

type SummarizeTextResult struct {

    // A unique identifier for this SummarizeTextResult.
    Id *string `mandatory:"true" json:"id"`

    // Summary result corresponding to input.
    Summary *string `mandatory:"true" json:"summary"`

    // The original input. Only included if "isEcho" set to true.
    Input *string `mandatory:"false" json:"input"`

    // The OCID of the model used in this inference request.
    ModelId *string `mandatory:"false" json:"modelId"`

    // The version of the model.
    ModelVersion *string `mandatory:"false" json:"modelVersion"`
}

func (SummarizeTextResult) String

func (m SummarizeTextResult) String() string

func (SummarizeTextResult) ValidateEnumValue

func (m SummarizeTextResult) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type TextContent

TextContent Represents a single instance of text chat content.

type TextContent struct {

    // The text content.
    Text *string `mandatory:"false" json:"text"`
}

func (TextContent) MarshalJSON

func (m TextContent) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (TextContent) String

func (m TextContent) String() string

func (TextContent) ValidateEnumValue

func (m TextContent) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type TokenLikelihood

TokenLikelihood An object that contains the returned token and its corresponding likelihood.

type TokenLikelihood struct {

    // A word, part of a word, or a punctuation.
    // For example, apple is a token and friendship is made up of two tokens, friend and ship. When you run a model, you can set the maximum number of output tokens. Estimate three tokens per word.
    Token *string `mandatory:"false" json:"token"`

    // The likelihood of this token during generation.
    Likelihood *float64 `mandatory:"false" json:"likelihood"`
}

func (TokenLikelihood) String

func (m TokenLikelihood) String() string

func (TokenLikelihood) ValidateEnumValue

func (m TokenLikelihood) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly