...

Package generativeaiinference

import "github.com/oracle/oci-go-sdk/generativeaiinference"
Overview
Index

Overview ▾

Index ▾

func GetBaseChatRequestApiFormatEnumStringValues() []string
func GetBaseChatResponseApiFormatEnumStringValues() []string
func GetChatContentTypeEnumStringValues() []string
func GetCohereChatRequestCitationQualityEnumStringValues() []string
func GetCohereChatRequestPromptTruncationEnumStringValues() []string
func GetCohereChatResponseFinishReasonEnumStringValues() []string
func GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues() []string
func GetCohereLlmInferenceRequestTruncateEnumStringValues() []string
func GetCohereMessageRoleEnumStringValues() []string
func GetEmbedTextDetailsInputTypeEnumStringValues() []string
func GetEmbedTextDetailsTruncateEnumStringValues() []string
func GetLlmInferenceRequestRuntimeTypeEnumStringValues() []string
func GetLlmInferenceResponseRuntimeTypeEnumStringValues() []string
func GetMessageRoleEnumStringValues() []string
func GetServingModeServingTypeEnumStringValues() []string
func GetSummarizeTextDetailsExtractivenessEnumStringValues() []string
func GetSummarizeTextDetailsFormatEnumStringValues() []string
func GetSummarizeTextDetailsLengthEnumStringValues() []string
type AssistantMessage
    func (m AssistantMessage) GetContent() []ChatContent
    func (m AssistantMessage) MarshalJSON() (buff []byte, e error)
    func (m AssistantMessage) String() string
    func (m *AssistantMessage) UnmarshalJSON(data []byte) (e error)
    func (m AssistantMessage) ValidateEnumValue() (bool, error)
type BaseChatRequest
type BaseChatRequestApiFormatEnum
    func GetBaseChatRequestApiFormatEnumValues() []BaseChatRequestApiFormatEnum
    func GetMappingBaseChatRequestApiFormatEnum(val string) (BaseChatRequestApiFormatEnum, bool)
type BaseChatResponse
type BaseChatResponseApiFormatEnum
    func GetBaseChatResponseApiFormatEnumValues() []BaseChatResponseApiFormatEnum
    func GetMappingBaseChatResponseApiFormatEnum(val string) (BaseChatResponseApiFormatEnum, bool)
type ChatChoice
    func (m ChatChoice) String() string
    func (m *ChatChoice) UnmarshalJSON(data []byte) (e error)
    func (m ChatChoice) ValidateEnumValue() (bool, error)
type ChatContent
type ChatContentTypeEnum
    func GetChatContentTypeEnumValues() []ChatContentTypeEnum
    func GetMappingChatContentTypeEnum(val string) (ChatContentTypeEnum, bool)
type ChatDetails
    func (m ChatDetails) String() string
    func (m *ChatDetails) UnmarshalJSON(data []byte) (e error)
    func (m ChatDetails) ValidateEnumValue() (bool, error)
type ChatRequest
    func (request ChatRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
    func (request ChatRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
    func (request ChatRequest) RetryPolicy() *common.RetryPolicy
    func (request ChatRequest) String() string
    func (request ChatRequest) ValidateEnumValue() (bool, error)
type ChatResponse
    func (response ChatResponse) HTTPResponse() *http.Response
    func (response ChatResponse) String() string
type ChatResult
    func (m ChatResult) String() string
    func (m *ChatResult) UnmarshalJSON(data []byte) (e error)
    func (m ChatResult) ValidateEnumValue() (bool, error)
type Choice
    func (m Choice) String() string
    func (m Choice) ValidateEnumValue() (bool, error)
type Citation
    func (m Citation) String() string
    func (m Citation) ValidateEnumValue() (bool, error)
type CohereChatBotMessage
    func (m CohereChatBotMessage) MarshalJSON() (buff []byte, e error)
    func (m CohereChatBotMessage) String() string
    func (m CohereChatBotMessage) ValidateEnumValue() (bool, error)
type CohereChatRequest
    func (m CohereChatRequest) MarshalJSON() (buff []byte, e error)
    func (m CohereChatRequest) String() string
    func (m *CohereChatRequest) UnmarshalJSON(data []byte) (e error)
    func (m CohereChatRequest) ValidateEnumValue() (bool, error)
type CohereChatRequestCitationQualityEnum
    func GetCohereChatRequestCitationQualityEnumValues() []CohereChatRequestCitationQualityEnum
    func GetMappingCohereChatRequestCitationQualityEnum(val string) (CohereChatRequestCitationQualityEnum, bool)
type CohereChatRequestPromptTruncationEnum
    func GetCohereChatRequestPromptTruncationEnumValues() []CohereChatRequestPromptTruncationEnum
    func GetMappingCohereChatRequestPromptTruncationEnum(val string) (CohereChatRequestPromptTruncationEnum, bool)
type CohereChatResponse
    func (m CohereChatResponse) MarshalJSON() (buff []byte, e error)
    func (m CohereChatResponse) String() string
    func (m *CohereChatResponse) UnmarshalJSON(data []byte) (e error)
    func (m CohereChatResponse) ValidateEnumValue() (bool, error)
type CohereChatResponseFinishReasonEnum
    func GetCohereChatResponseFinishReasonEnumValues() []CohereChatResponseFinishReasonEnum
    func GetMappingCohereChatResponseFinishReasonEnum(val string) (CohereChatResponseFinishReasonEnum, bool)
type CohereLlmInferenceRequest
    func (m CohereLlmInferenceRequest) MarshalJSON() (buff []byte, e error)
    func (m CohereLlmInferenceRequest) String() string
    func (m CohereLlmInferenceRequest) ValidateEnumValue() (bool, error)
type CohereLlmInferenceRequestReturnLikelihoodsEnum
    func GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues() []CohereLlmInferenceRequestReturnLikelihoodsEnum
    func GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum(val string) (CohereLlmInferenceRequestReturnLikelihoodsEnum, bool)
type CohereLlmInferenceRequestTruncateEnum
    func GetCohereLlmInferenceRequestTruncateEnumValues() []CohereLlmInferenceRequestTruncateEnum
    func GetMappingCohereLlmInferenceRequestTruncateEnum(val string) (CohereLlmInferenceRequestTruncateEnum, bool)
type CohereLlmInferenceResponse
    func (m CohereLlmInferenceResponse) MarshalJSON() (buff []byte, e error)
    func (m CohereLlmInferenceResponse) String() string
    func (m CohereLlmInferenceResponse) ValidateEnumValue() (bool, error)
type CohereMessage
type CohereMessageRoleEnum
    func GetCohereMessageRoleEnumValues() []CohereMessageRoleEnum
    func GetMappingCohereMessageRoleEnum(val string) (CohereMessageRoleEnum, bool)
type CohereParameterDefinition
    func (m CohereParameterDefinition) String() string
    func (m CohereParameterDefinition) ValidateEnumValue() (bool, error)
type CohereSystemMessage
    func (m CohereSystemMessage) MarshalJSON() (buff []byte, e error)
    func (m CohereSystemMessage) String() string
    func (m CohereSystemMessage) ValidateEnumValue() (bool, error)
type CohereTool
    func (m CohereTool) String() string
    func (m CohereTool) ValidateEnumValue() (bool, error)
type CohereToolCall
    func (m CohereToolCall) String() string
    func (m CohereToolCall) ValidateEnumValue() (bool, error)
type CohereToolMessage
    func (m CohereToolMessage) MarshalJSON() (buff []byte, e error)
    func (m CohereToolMessage) String() string
    func (m CohereToolMessage) ValidateEnumValue() (bool, error)
type CohereToolResult
    func (m CohereToolResult) String() string
    func (m CohereToolResult) ValidateEnumValue() (bool, error)
type CohereUserMessage
    func (m CohereUserMessage) MarshalJSON() (buff []byte, e error)
    func (m CohereUserMessage) String() string
    func (m CohereUserMessage) ValidateEnumValue() (bool, error)
type DedicatedServingMode
    func (m DedicatedServingMode) MarshalJSON() (buff []byte, e error)
    func (m DedicatedServingMode) String() string
    func (m DedicatedServingMode) ValidateEnumValue() (bool, error)
type EmbedTextDetails
    func (m EmbedTextDetails) String() string
    func (m *EmbedTextDetails) UnmarshalJSON(data []byte) (e error)
    func (m EmbedTextDetails) ValidateEnumValue() (bool, error)
type EmbedTextDetailsInputTypeEnum
    func GetEmbedTextDetailsInputTypeEnumValues() []EmbedTextDetailsInputTypeEnum
    func GetMappingEmbedTextDetailsInputTypeEnum(val string) (EmbedTextDetailsInputTypeEnum, bool)
type EmbedTextDetailsTruncateEnum
    func GetEmbedTextDetailsTruncateEnumValues() []EmbedTextDetailsTruncateEnum
    func GetMappingEmbedTextDetailsTruncateEnum(val string) (EmbedTextDetailsTruncateEnum, bool)
type EmbedTextRequest
    func (request EmbedTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
    func (request EmbedTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
    func (request EmbedTextRequest) RetryPolicy() *common.RetryPolicy
    func (request EmbedTextRequest) String() string
    func (request EmbedTextRequest) ValidateEnumValue() (bool, error)
type EmbedTextResponse
    func (response EmbedTextResponse) HTTPResponse() *http.Response
    func (response EmbedTextResponse) String() string
type EmbedTextResult
    func (m EmbedTextResult) String() string
    func (m EmbedTextResult) ValidateEnumValue() (bool, error)
type GenerateTextDetails
    func (m GenerateTextDetails) String() string
    func (m *GenerateTextDetails) UnmarshalJSON(data []byte) (e error)
    func (m GenerateTextDetails) ValidateEnumValue() (bool, error)
type GenerateTextRequest
    func (request GenerateTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
    func (request GenerateTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
    func (request GenerateTextRequest) RetryPolicy() *common.RetryPolicy
    func (request GenerateTextRequest) String() string
    func (request GenerateTextRequest) ValidateEnumValue() (bool, error)
type GenerateTextResponse
    func (response GenerateTextResponse) HTTPResponse() *http.Response
    func (response GenerateTextResponse) String() string
type GenerateTextResult
    func (m GenerateTextResult) String() string
    func (m *GenerateTextResult) UnmarshalJSON(data []byte) (e error)
    func (m GenerateTextResult) ValidateEnumValue() (bool, error)
type GeneratedText
    func (m GeneratedText) String() string
    func (m GeneratedText) ValidateEnumValue() (bool, error)
type GenerativeAiInferenceClient
    func NewGenerativeAiInferenceClientWithConfigurationProvider(configProvider common.ConfigurationProvider) (client GenerativeAiInferenceClient, err error)
    func NewGenerativeAiInferenceClientWithOboToken(configProvider common.ConfigurationProvider, oboToken string) (client GenerativeAiInferenceClient, err error)
    func (client GenerativeAiInferenceClient) Chat(ctx context.Context, request ChatRequest) (response ChatResponse, err error)
    func (client *GenerativeAiInferenceClient) ConfigurationProvider() *common.ConfigurationProvider
    func (client GenerativeAiInferenceClient) EmbedText(ctx context.Context, request EmbedTextRequest) (response EmbedTextResponse, err error)
    func (client GenerativeAiInferenceClient) GenerateText(ctx context.Context, request GenerateTextRequest) (response GenerateTextResponse, err error)
    func (client *GenerativeAiInferenceClient) SetRegion(region string)
    func (client GenerativeAiInferenceClient) SummarizeText(ctx context.Context, request SummarizeTextRequest) (response SummarizeTextResponse, err error)
type GenericChatRequest
    func (m GenericChatRequest) MarshalJSON() (buff []byte, e error)
    func (m GenericChatRequest) String() string
    func (m *GenericChatRequest) UnmarshalJSON(data []byte) (e error)
    func (m GenericChatRequest) ValidateEnumValue() (bool, error)
type GenericChatResponse
    func (m GenericChatResponse) MarshalJSON() (buff []byte, e error)
    func (m GenericChatResponse) String() string
    func (m GenericChatResponse) ValidateEnumValue() (bool, error)
type LlamaLlmInferenceRequest
    func (m LlamaLlmInferenceRequest) MarshalJSON() (buff []byte, e error)
    func (m LlamaLlmInferenceRequest) String() string
    func (m LlamaLlmInferenceRequest) ValidateEnumValue() (bool, error)
type LlamaLlmInferenceResponse
    func (m LlamaLlmInferenceResponse) MarshalJSON() (buff []byte, e error)
    func (m LlamaLlmInferenceResponse) String() string
    func (m LlamaLlmInferenceResponse) ValidateEnumValue() (bool, error)
type LlmInferenceRequest
type LlmInferenceRequestRuntimeTypeEnum
    func GetLlmInferenceRequestRuntimeTypeEnumValues() []LlmInferenceRequestRuntimeTypeEnum
    func GetMappingLlmInferenceRequestRuntimeTypeEnum(val string) (LlmInferenceRequestRuntimeTypeEnum, bool)
type LlmInferenceResponse
type LlmInferenceResponseRuntimeTypeEnum
    func GetLlmInferenceResponseRuntimeTypeEnumValues() []LlmInferenceResponseRuntimeTypeEnum
    func GetMappingLlmInferenceResponseRuntimeTypeEnum(val string) (LlmInferenceResponseRuntimeTypeEnum, bool)
type Logprobs
    func (m Logprobs) String() string
    func (m Logprobs) ValidateEnumValue() (bool, error)
type Message
type MessageRoleEnum
    func GetMappingMessageRoleEnum(val string) (MessageRoleEnum, bool)
    func GetMessageRoleEnumValues() []MessageRoleEnum
type OnDemandServingMode
    func (m OnDemandServingMode) MarshalJSON() (buff []byte, e error)
    func (m OnDemandServingMode) String() string
    func (m OnDemandServingMode) ValidateEnumValue() (bool, error)
type SearchQuery
    func (m SearchQuery) String() string
    func (m SearchQuery) ValidateEnumValue() (bool, error)
type ServingMode
type ServingModeServingTypeEnum
    func GetMappingServingModeServingTypeEnum(val string) (ServingModeServingTypeEnum, bool)
    func GetServingModeServingTypeEnumValues() []ServingModeServingTypeEnum
type SummarizeTextDetails
    func (m SummarizeTextDetails) String() string
    func (m *SummarizeTextDetails) UnmarshalJSON(data []byte) (e error)
    func (m SummarizeTextDetails) ValidateEnumValue() (bool, error)
type SummarizeTextDetailsExtractivenessEnum
    func GetMappingSummarizeTextDetailsExtractivenessEnum(val string) (SummarizeTextDetailsExtractivenessEnum, bool)
    func GetSummarizeTextDetailsExtractivenessEnumValues() []SummarizeTextDetailsExtractivenessEnum
type SummarizeTextDetailsFormatEnum
    func GetMappingSummarizeTextDetailsFormatEnum(val string) (SummarizeTextDetailsFormatEnum, bool)
    func GetSummarizeTextDetailsFormatEnumValues() []SummarizeTextDetailsFormatEnum
type SummarizeTextDetailsLengthEnum
    func GetMappingSummarizeTextDetailsLengthEnum(val string) (SummarizeTextDetailsLengthEnum, bool)
    func GetSummarizeTextDetailsLengthEnumValues() []SummarizeTextDetailsLengthEnum
type SummarizeTextRequest
    func (request SummarizeTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
    func (request SummarizeTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
    func (request SummarizeTextRequest) RetryPolicy() *common.RetryPolicy
    func (request SummarizeTextRequest) String() string
    func (request SummarizeTextRequest) ValidateEnumValue() (bool, error)
type SummarizeTextResponse
    func (response SummarizeTextResponse) HTTPResponse() *http.Response
    func (response SummarizeTextResponse) String() string
type SummarizeTextResult
    func (m SummarizeTextResult) String() string
    func (m SummarizeTextResult) ValidateEnumValue() (bool, error)
type SystemMessage
    func (m SystemMessage) GetContent() []ChatContent
    func (m SystemMessage) MarshalJSON() (buff []byte, e error)
    func (m SystemMessage) String() string
    func (m *SystemMessage) UnmarshalJSON(data []byte) (e error)
    func (m SystemMessage) ValidateEnumValue() (bool, error)
type TextContent
    func (m TextContent) MarshalJSON() (buff []byte, e error)
    func (m TextContent) String() string
    func (m TextContent) ValidateEnumValue() (bool, error)
type TokenLikelihood
    func (m TokenLikelihood) String() string
    func (m TokenLikelihood) ValidateEnumValue() (bool, error)
type UserMessage
    func (m UserMessage) GetContent() []ChatContent
    func (m UserMessage) MarshalJSON() (buff []byte, e error)
    func (m UserMessage) String() string
    func (m *UserMessage) UnmarshalJSON(data []byte) (e error)
    func (m UserMessage) ValidateEnumValue() (bool, error)

Package files

assistant_message.go base_chat_request.go base_chat_response.go chat_choice.go chat_content.go chat_details.go chat_request_response.go chat_result.go choice.go citation.go cohere_chat_bot_message.go cohere_chat_request.go cohere_chat_response.go cohere_llm_inference_request.go cohere_llm_inference_response.go cohere_message.go cohere_parameter_definition.go cohere_system_message.go cohere_tool.go cohere_tool_call.go cohere_tool_message.go cohere_tool_result.go cohere_user_message.go dedicated_serving_mode.go embed_text_details.go embed_text_request_response.go embed_text_result.go generate_text_details.go generate_text_request_response.go generate_text_result.go generated_text.go generativeaiinference_client.go generic_chat_request.go generic_chat_response.go llama_llm_inference_request.go llama_llm_inference_response.go llm_inference_request.go llm_inference_response.go logprobs.go message.go on_demand_serving_mode.go search_query.go serving_mode.go summarize_text_details.go summarize_text_request_response.go summarize_text_result.go system_message.go text_content.go token_likelihood.go user_message.go

func GetBaseChatRequestApiFormatEnumStringValues

func GetBaseChatRequestApiFormatEnumStringValues() []string

GetBaseChatRequestApiFormatEnumStringValues Enumerates the set of values in String for BaseChatRequestApiFormatEnum

func GetBaseChatResponseApiFormatEnumStringValues

func GetBaseChatResponseApiFormatEnumStringValues() []string

GetBaseChatResponseApiFormatEnumStringValues Enumerates the set of values in String for BaseChatResponseApiFormatEnum

func GetChatContentTypeEnumStringValues

func GetChatContentTypeEnumStringValues() []string

GetChatContentTypeEnumStringValues Enumerates the set of values in String for ChatContentTypeEnum

func GetCohereChatRequestCitationQualityEnumStringValues

func GetCohereChatRequestCitationQualityEnumStringValues() []string

GetCohereChatRequestCitationQualityEnumStringValues Enumerates the set of values in String for CohereChatRequestCitationQualityEnum

func GetCohereChatRequestPromptTruncationEnumStringValues

func GetCohereChatRequestPromptTruncationEnumStringValues() []string

GetCohereChatRequestPromptTruncationEnumStringValues Enumerates the set of values in String for CohereChatRequestPromptTruncationEnum

func GetCohereChatResponseFinishReasonEnumStringValues

func GetCohereChatResponseFinishReasonEnumStringValues() []string

GetCohereChatResponseFinishReasonEnumStringValues Enumerates the set of values in String for CohereChatResponseFinishReasonEnum

func GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues

func GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues() []string

GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues Enumerates the set of values in String for CohereLlmInferenceRequestReturnLikelihoodsEnum

func GetCohereLlmInferenceRequestTruncateEnumStringValues

func GetCohereLlmInferenceRequestTruncateEnumStringValues() []string

GetCohereLlmInferenceRequestTruncateEnumStringValues Enumerates the set of values in String for CohereLlmInferenceRequestTruncateEnum

func GetCohereMessageRoleEnumStringValues

func GetCohereMessageRoleEnumStringValues() []string

GetCohereMessageRoleEnumStringValues Enumerates the set of values in String for CohereMessageRoleEnum

func GetEmbedTextDetailsInputTypeEnumStringValues

func GetEmbedTextDetailsInputTypeEnumStringValues() []string

GetEmbedTextDetailsInputTypeEnumStringValues Enumerates the set of values in String for EmbedTextDetailsInputTypeEnum

func GetEmbedTextDetailsTruncateEnumStringValues

func GetEmbedTextDetailsTruncateEnumStringValues() []string

GetEmbedTextDetailsTruncateEnumStringValues Enumerates the set of values in String for EmbedTextDetailsTruncateEnum

func GetLlmInferenceRequestRuntimeTypeEnumStringValues

func GetLlmInferenceRequestRuntimeTypeEnumStringValues() []string

GetLlmInferenceRequestRuntimeTypeEnumStringValues Enumerates the set of values in String for LlmInferenceRequestRuntimeTypeEnum

func GetLlmInferenceResponseRuntimeTypeEnumStringValues

func GetLlmInferenceResponseRuntimeTypeEnumStringValues() []string

GetLlmInferenceResponseRuntimeTypeEnumStringValues Enumerates the set of values in String for LlmInferenceResponseRuntimeTypeEnum

func GetMessageRoleEnumStringValues

func GetMessageRoleEnumStringValues() []string

GetMessageRoleEnumStringValues Enumerates the set of values in String for MessageRoleEnum

func GetServingModeServingTypeEnumStringValues

func GetServingModeServingTypeEnumStringValues() []string

GetServingModeServingTypeEnumStringValues Enumerates the set of values in String for ServingModeServingTypeEnum

func GetSummarizeTextDetailsExtractivenessEnumStringValues

func GetSummarizeTextDetailsExtractivenessEnumStringValues() []string

GetSummarizeTextDetailsExtractivenessEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsExtractivenessEnum

func GetSummarizeTextDetailsFormatEnumStringValues

func GetSummarizeTextDetailsFormatEnumStringValues() []string

GetSummarizeTextDetailsFormatEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsFormatEnum

func GetSummarizeTextDetailsLengthEnumStringValues

func GetSummarizeTextDetailsLengthEnumStringValues() []string

GetSummarizeTextDetailsLengthEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsLengthEnum

type AssistantMessage

AssistantMessage Represents a single instance of assistant message.

type AssistantMessage struct {

    // Contents of the chat message.
    Content []ChatContent `mandatory:"false" json:"content"`

    // An optional name for the participant. Provides the model information to differentiate between participants of the same role.
    Name *string `mandatory:"false" json:"name"`
}

func (AssistantMessage) GetContent

func (m AssistantMessage) GetContent() []ChatContent

GetContent returns Content

func (AssistantMessage) MarshalJSON

func (m AssistantMessage) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (AssistantMessage) String

func (m AssistantMessage) String() string

func (*AssistantMessage) UnmarshalJSON

func (m *AssistantMessage) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (AssistantMessage) ValidateEnumValue

func (m AssistantMessage) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type BaseChatRequest

BaseChatRequest The base class to use for the chat inference request.

type BaseChatRequest interface {
}

type BaseChatRequestApiFormatEnum

BaseChatRequestApiFormatEnum Enum with underlying type: string

type BaseChatRequestApiFormatEnum string

Set of constants representing the allowable values for BaseChatRequestApiFormatEnum

const (
    BaseChatRequestApiFormatCohere  BaseChatRequestApiFormatEnum = "COHERE"
    BaseChatRequestApiFormatGeneric BaseChatRequestApiFormatEnum = "GENERIC"
)

func GetBaseChatRequestApiFormatEnumValues

func GetBaseChatRequestApiFormatEnumValues() []BaseChatRequestApiFormatEnum

GetBaseChatRequestApiFormatEnumValues Enumerates the set of values for BaseChatRequestApiFormatEnum

func GetMappingBaseChatRequestApiFormatEnum

func GetMappingBaseChatRequestApiFormatEnum(val string) (BaseChatRequestApiFormatEnum, bool)

GetMappingBaseChatRequestApiFormatEnum performs case Insensitive comparison on enum value and return the desired enum

type BaseChatResponse

BaseChatResponse The base class that creates the chat response.

type BaseChatResponse interface {
}

type BaseChatResponseApiFormatEnum

BaseChatResponseApiFormatEnum Enum with underlying type: string

type BaseChatResponseApiFormatEnum string

Set of constants representing the allowable values for BaseChatResponseApiFormatEnum

const (
    BaseChatResponseApiFormatCohere  BaseChatResponseApiFormatEnum = "COHERE"
    BaseChatResponseApiFormatGeneric BaseChatResponseApiFormatEnum = "GENERIC"
)

func GetBaseChatResponseApiFormatEnumValues

func GetBaseChatResponseApiFormatEnumValues() []BaseChatResponseApiFormatEnum

GetBaseChatResponseApiFormatEnumValues Enumerates the set of values for BaseChatResponseApiFormatEnum

func GetMappingBaseChatResponseApiFormatEnum

func GetMappingBaseChatResponseApiFormatEnum(val string) (BaseChatResponseApiFormatEnum, bool)

GetMappingBaseChatResponseApiFormatEnum performs case Insensitive comparison on enum value and return the desired enum

type ChatChoice

ChatChoice Represents a single instance of the chat response.

type ChatChoice struct {

    // The index of the chat.
    Index *int `mandatory:"true" json:"index"`

    Message Message `mandatory:"true" json:"message"`

    // The reason why the model stopped generating tokens.
    // Stops if the model hits a natural stop point or a provided stop sequence. Returns the length if the tokens reach the specified maximum number of tokens.
    FinishReason *string `mandatory:"true" json:"finishReason"`

    Logprobs *Logprobs `mandatory:"false" json:"logprobs"`
}

func (ChatChoice) String

func (m ChatChoice) String() string

func (*ChatChoice) UnmarshalJSON

func (m *ChatChoice) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (ChatChoice) ValidateEnumValue

func (m ChatChoice) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type ChatContent

ChatContent The base class for the chat content.

type ChatContent interface {
}

type ChatContentTypeEnum

ChatContentTypeEnum Enum with underlying type: string

type ChatContentTypeEnum string

Set of constants representing the allowable values for ChatContentTypeEnum

const (
    ChatContentTypeText ChatContentTypeEnum = "TEXT"
)

func GetChatContentTypeEnumValues

func GetChatContentTypeEnumValues() []ChatContentTypeEnum

GetChatContentTypeEnumValues Enumerates the set of values for ChatContentTypeEnum

func GetMappingChatContentTypeEnum

func GetMappingChatContentTypeEnum(val string) (ChatContentTypeEnum, bool)

GetMappingChatContentTypeEnum performs case Insensitive comparison on enum value and return the desired enum

type ChatDetails

ChatDetails Details of the conversation for the model to respond.

type ChatDetails struct {

    // The OCID of compartment in which to call the Generative AI service to chat.
    CompartmentId *string `mandatory:"true" json:"compartmentId"`

    ServingMode ServingMode `mandatory:"true" json:"servingMode"`

    ChatRequest BaseChatRequest `mandatory:"true" json:"chatRequest"`
}

func (ChatDetails) String

func (m ChatDetails) String() string

func (*ChatDetails) UnmarshalJSON

func (m *ChatDetails) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (ChatDetails) ValidateEnumValue

func (m ChatDetails) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type ChatRequest

ChatRequest wrapper for the Chat operation

See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.68.0/generativeaiinference/Chat.go.html to see an example of how to use ChatRequest.

type ChatRequest struct {

    // Details of the conversation for the model to respond.
    ChatDetails `contributesTo:"body"`

    // A token that uniquely identifies a request so it can be retried in case of a timeout or
    // server error without risk of executing that same action again. Retry tokens expire after 24
    // hours, but can be invalidated before that, in case of conflicting operations. For example, if a resource is deleted and purged from the system, then a retry of the original creation request
    // is rejected.
    OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"`

    // The client request ID for tracing.
    OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`

    // Metadata about the request. This information will not be transmitted to the service, but
    // represents information that the SDK will consume to drive retry behavior.
    RequestMetadata common.RequestMetadata
}

func (ChatRequest) BinaryRequestBody

func (request ChatRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)

BinaryRequestBody implements the OCIRequest interface

func (ChatRequest) HTTPRequest

func (request ChatRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)

HTTPRequest implements the OCIRequest interface

func (ChatRequest) RetryPolicy

func (request ChatRequest) RetryPolicy() *common.RetryPolicy

RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.

func (ChatRequest) String

func (request ChatRequest) String() string

func (ChatRequest) ValidateEnumValue

func (request ChatRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type ChatResponse

ChatResponse wrapper for the Chat operation

type ChatResponse struct {

    // The underlying http response
    RawResponse *http.Response

    // The ChatResult instance
    ChatResult `presentIn:"body"`

    // For optimistic concurrency control. See `if-match`.
    Etag *string `presentIn:"header" name:"etag"`

    // Unique Oracle-assigned identifier for the request. If you need to contact
    // Oracle about a particular request, please provide the request ID.
    OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}

func (ChatResponse) HTTPResponse

func (response ChatResponse) HTTPResponse() *http.Response

HTTPResponse implements the OCIResponse interface

func (ChatResponse) String

func (response ChatResponse) String() string

type ChatResult

ChatResult The response to the chat conversation.

type ChatResult struct {

    // The OCID of the model that's used in this inference request.
    ModelId *string `mandatory:"true" json:"modelId"`

    // The version of the model.
    ModelVersion *string `mandatory:"true" json:"modelVersion"`

    ChatResponse BaseChatResponse `mandatory:"true" json:"chatResponse"`
}

func (ChatResult) String

func (m ChatResult) String() string

func (*ChatResult) UnmarshalJSON

func (m *ChatResult) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (ChatResult) ValidateEnumValue

func (m ChatResult) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type Choice

Choice Represents a single instance of the generated text.

type Choice struct {

    // The index of the generated text.
    Index *int `mandatory:"true" json:"index"`

    // The generated text.
    Text *string `mandatory:"true" json:"text"`

    // The reason why the model stopped generating tokens.
    // Stops if the model hits a natural stop point or a provided stop sequence. Returns the length if the tokens reach the specified maximum number of tokens.
    FinishReason *string `mandatory:"true" json:"finishReason"`

    Logprobs *Logprobs `mandatory:"false" json:"logprobs"`
}

func (Choice) String

func (m Choice) String() string

func (Choice) ValidateEnumValue

func (m Choice) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type Citation

Citation A section of the generated response which cites the documents that were used for generating the response.

type Citation struct {

    // Counting from zero, the index of the text where the citation starts.
    Start *int `mandatory:"true" json:"start"`

    // Counting from zero, the index of the text that the citation ends after.
    End *int `mandatory:"true" json:"end"`

    // The text of the citation.
    Text *string `mandatory:"true" json:"text"`

    // Identifiers for the documents cited in the current generated response.
    DocumentIds []string `mandatory:"true" json:"documentIds"`
}

func (Citation) String

func (m Citation) String() string

func (Citation) ValidateEnumValue

func (m Citation) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereChatBotMessage

CohereChatBotMessage A message that represents a single chat dialog as CHATBOT role.

type CohereChatBotMessage struct {

    // Contents of the chat message.
    Message *string `mandatory:"false" json:"message"`

    // A list of tool calls generated by the model.
    ToolCalls []CohereToolCall `mandatory:"false" json:"toolCalls"`
}

func (CohereChatBotMessage) MarshalJSON

func (m CohereChatBotMessage) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereChatBotMessage) String

func (m CohereChatBotMessage) String() string

func (CohereChatBotMessage) ValidateEnumValue

func (m CohereChatBotMessage) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereChatRequest

CohereChatRequest Details for the chat request for Cohere models.

type CohereChatRequest struct {

    // The text that the user inputs for the model to respond to.
    Message *string `mandatory:"true" json:"message"`

    // The list of previous messages between the user and the model. The chat history gives the model context for responding to the user's inputs.
    ChatHistory []CohereMessage `mandatory:"false" json:"chatHistory"`

    // A list of relevant documents that the model can refer to for generating grounded responses to the user's requests.
    // Some example keys that you can add to the dictionary are "text", "author", and "date". Keep the total word count of the strings in the dictionary to 300 words or less.
    // Example:
    // `[
    //   { "title": "Tall penguins", "snippet": "Emperor penguins are the tallest." },
    //   { "title": "Penguin habitats", "snippet": "Emperor penguins only live in Antarctica." }
    // ]`
    Documents []interface{} `mandatory:"false" json:"documents"`

    // When set to true, the response contains only a list of generated search queries without the search results and the model will not respond to the user's message.
    IsSearchQueriesOnly *bool `mandatory:"false" json:"isSearchQueriesOnly"`

    // If specified, the default Cohere preamble is replaced with the provided preamble. A preamble is an initial guideline message that can change the model's overall chat behavior and conversation style. Default preambles vary for different models.
    // Example: `You are a travel advisor. Answer with a pirate tone.`
    PreambleOverride *string `mandatory:"false" json:"preambleOverride"`

    // Whether to stream the partial progress of the model's response. When set to true, as tokens become available, they are sent as data-only server-sent events.
    IsStream *bool `mandatory:"false" json:"isStream"`

    // The maximum number of output tokens that the model will generate for the response.
    MaxTokens *int `mandatory:"false" json:"maxTokens"`

    // A number that sets the randomness of the generated output. A lower temperature means less random generations.
    // Use lower numbers for tasks such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.
    Temperature *float64 `mandatory:"false" json:"temperature"`

    // A sampling method in which the model chooses the next token randomly from the top k most likely tokens. A higher value for k generates more random output, which makes the output text sound more natural. The default value for k is 0 which disables this method and considers all tokens. To set a number for the likely tokens, choose an integer between 1 and 500.
    // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20 but only the probabilities of the top 10 add up to the value of p, then only the top 10 tokens are chosen.
    TopK *int `mandatory:"false" json:"topK"`

    // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step.
    // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1.0 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k.
    TopP *float64 `mandatory:"false" json:"topP"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. Set to 0 to disable.
    FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens.
    // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable.
    PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"`

    // If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
    Seed *int `mandatory:"false" json:"seed"`

    // Returns the full prompt that was sent to the model when True.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // A list of available tools (functions) that the model may suggest invoking before producing a text response.
    Tools []CohereTool `mandatory:"false" json:"tools"`

    // A list of results from invoking tools recommended by the model in the previous chat turn.
    ToolResults []CohereToolResult `mandatory:"false" json:"toolResults"`

    // When enabled, the model will issue (potentially multiple) tool calls in a single step, before it receives the tool responses and directly answers the user's original message.
    IsForceSingleStep *bool `mandatory:"false" json:"isForceSingleStep"`

    // Stop the model generation when it reaches a stop sequence defined in this parameter.
    StopSequences []string `mandatory:"false" json:"stopSequences"`

    // When enabled, the user’s `message` will be sent to the model without any preprocessing.
    IsRawPrompting *bool `mandatory:"false" json:"isRawPrompting"`

    // Defaults to OFF. Dictates how the prompt will be constructed. With `prompt_truncation` set to AUTO_PRESERVE_ORDER, some elements from `chat_history` and `documents` will be dropped to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved. With `prompt_truncation` set to OFF, no elements will be dropped.
    PromptTruncation CohereChatRequestPromptTruncationEnum `mandatory:"false" json:"promptTruncation,omitempty"`

    // When FAST is selected, citations are generated at the same time as the text output and the request will be completed sooner. May result in less accurate citations.
    CitationQuality CohereChatRequestCitationQualityEnum `mandatory:"false" json:"citationQuality,omitempty"`
}

func (CohereChatRequest) MarshalJSON

func (m CohereChatRequest) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereChatRequest) String

func (m CohereChatRequest) String() string

func (*CohereChatRequest) UnmarshalJSON

func (m *CohereChatRequest) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (CohereChatRequest) ValidateEnumValue

func (m CohereChatRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereChatRequestCitationQualityEnum

CohereChatRequestCitationQualityEnum Enum with underlying type: string

type CohereChatRequestCitationQualityEnum string

Set of constants representing the allowable values for CohereChatRequestCitationQualityEnum

const (
    CohereChatRequestCitationQualityAccurate CohereChatRequestCitationQualityEnum = "ACCURATE"
    CohereChatRequestCitationQualityFast     CohereChatRequestCitationQualityEnum = "FAST"
)

func GetCohereChatRequestCitationQualityEnumValues

func GetCohereChatRequestCitationQualityEnumValues() []CohereChatRequestCitationQualityEnum

GetCohereChatRequestCitationQualityEnumValues Enumerates the set of values for CohereChatRequestCitationQualityEnum

func GetMappingCohereChatRequestCitationQualityEnum

func GetMappingCohereChatRequestCitationQualityEnum(val string) (CohereChatRequestCitationQualityEnum, bool)

GetMappingCohereChatRequestCitationQualityEnum performs case Insensitive comparison on enum value and return the desired enum

type CohereChatRequestPromptTruncationEnum

CohereChatRequestPromptTruncationEnum Enum with underlying type: string

type CohereChatRequestPromptTruncationEnum string

Set of constants representing the allowable values for CohereChatRequestPromptTruncationEnum

const (
    CohereChatRequestPromptTruncationOff               CohereChatRequestPromptTruncationEnum = "OFF"
    CohereChatRequestPromptTruncationAutoPreserveOrder CohereChatRequestPromptTruncationEnum = "AUTO_PRESERVE_ORDER"
)

func GetCohereChatRequestPromptTruncationEnumValues

func GetCohereChatRequestPromptTruncationEnumValues() []CohereChatRequestPromptTruncationEnum

GetCohereChatRequestPromptTruncationEnumValues Enumerates the set of values for CohereChatRequestPromptTruncationEnum

func GetMappingCohereChatRequestPromptTruncationEnum

func GetMappingCohereChatRequestPromptTruncationEnum(val string) (CohereChatRequestPromptTruncationEnum, bool)

GetMappingCohereChatRequestPromptTruncationEnum performs case Insensitive comparison on enum value and return the desired enum

type CohereChatResponse

CohereChatResponse The response to the chat conversation.

type CohereChatResponse struct {

    // Contents of the response that the model generates.
    Text *string `mandatory:"true" json:"text"`

    // The list of previous messages between the user and the model. The chat history gives the model context for responding to the user's inputs.
    ChatHistory []CohereMessage `mandatory:"false" json:"chatHistory"`

    // Inline citations for the generated response.
    Citations []Citation `mandatory:"false" json:"citations"`

    // If set to true, a search for documents is required.
    IsSearchRequired *bool `mandatory:"false" json:"isSearchRequired"`

    // If there is an error during the streaming scenario, then the `errorMessage` parameter contains details for the error.
    ErrorMessage *string `mandatory:"false" json:"errorMessage"`

    // The generated search queries.
    SearchQueries []SearchQuery `mandatory:"false" json:"searchQueries"`

    // The documents that the model can refer to when generating a response. Each document is a JSON string that represents the field and values of the document.
    // Example:
    // '[
    //   {
    //     "id": "doc_0",
    //     "snippet": "Emperor penguins are the tallest.",
    //     "title": "Tall penguins"
    //   },
    //   {
    //     "id": "doc_1",
    //     "snippet": "Emperor penguins only live in Antarctica.",
    //     "title": "Penguin habitats"
    //   }
    // ]'
    Documents []interface{} `mandatory:"false" json:"documents"`

    // A list of tool calls generated by the model.
    ToolCalls []CohereToolCall `mandatory:"false" json:"toolCalls"`

    // The full prompt that was sent to the model if isEcho is true when request.
    Prompt *string `mandatory:"false" json:"prompt"`

    // Why the generation stopped.
    FinishReason CohereChatResponseFinishReasonEnum `mandatory:"true" json:"finishReason"`
}

func (CohereChatResponse) MarshalJSON

func (m CohereChatResponse) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereChatResponse) String

func (m CohereChatResponse) String() string

func (*CohereChatResponse) UnmarshalJSON

func (m *CohereChatResponse) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (CohereChatResponse) ValidateEnumValue

func (m CohereChatResponse) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereChatResponseFinishReasonEnum

CohereChatResponseFinishReasonEnum Enum with underlying type: string

type CohereChatResponseFinishReasonEnum string

Set of constants representing the allowable values for CohereChatResponseFinishReasonEnum

const (
    CohereChatResponseFinishReasonComplete   CohereChatResponseFinishReasonEnum = "COMPLETE"
    CohereChatResponseFinishReasonErrorToxic CohereChatResponseFinishReasonEnum = "ERROR_TOXIC"
    CohereChatResponseFinishReasonErrorLimit CohereChatResponseFinishReasonEnum = "ERROR_LIMIT"
    CohereChatResponseFinishReasonError      CohereChatResponseFinishReasonEnum = "ERROR"
    CohereChatResponseFinishReasonUserCancel CohereChatResponseFinishReasonEnum = "USER_CANCEL"
    CohereChatResponseFinishReasonMaxTokens  CohereChatResponseFinishReasonEnum = "MAX_TOKENS"
)

func GetCohereChatResponseFinishReasonEnumValues

func GetCohereChatResponseFinishReasonEnumValues() []CohereChatResponseFinishReasonEnum

GetCohereChatResponseFinishReasonEnumValues Enumerates the set of values for CohereChatResponseFinishReasonEnum

func GetMappingCohereChatResponseFinishReasonEnum

func GetMappingCohereChatResponseFinishReasonEnum(val string) (CohereChatResponseFinishReasonEnum, bool)

GetMappingCohereChatResponseFinishReasonEnum performs case Insensitive comparison on enum value and return the desired enum

type CohereLlmInferenceRequest

CohereLlmInferenceRequest Details for the text generation request for Cohere models.

type CohereLlmInferenceRequest struct {

    // Represents the prompt to be completed. The trailing white spaces are trimmed before completion.
    Prompt *string `mandatory:"true" json:"prompt"`

    // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available.
    IsStream *bool `mandatory:"false" json:"isStream"`

    // The number of generated texts that will be returned.
    NumGenerations *int `mandatory:"false" json:"numGenerations"`

    // Whether or not to return the user prompt in the response. This option only applies to non-stream results.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // The maximum number of tokens to predict for each response. Includes input plus output tokens.
    MaxTokens *int `mandatory:"false" json:"maxTokens"`

    // A number that sets the randomness of the generated output. A lower temperature means a less random generations.
    // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.
    Temperature *float64 `mandatory:"false" json:"temperature"`

    // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is 0 which disables this method and considers all tokens. To set a number for the likely tokens, choose an integer between 1 and 500.
    // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen.
    TopK *int `mandatory:"false" json:"topK"`

    // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step.
    // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1.0 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k.
    TopP *float64 `mandatory:"false" json:"topP"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. Set to 0 to disable.
    FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens.
    // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable.
    PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"`

    // The generated text is cut at the end of the earliest occurrence of this stop sequence. The generated text will include this stop sequence.
    StopSequences []string `mandatory:"false" json:"stopSequences"`

    // Specifies how and if the token likelihoods are returned with the response.
    ReturnLikelihoods CohereLlmInferenceRequestReturnLikelihoodsEnum `mandatory:"false" json:"returnLikelihoods,omitempty"`

    // For an input that's longer than the maximum token length, specifies which part of the input text will be truncated.
    Truncate CohereLlmInferenceRequestTruncateEnum `mandatory:"false" json:"truncate,omitempty"`
}

func (CohereLlmInferenceRequest) MarshalJSON

func (m CohereLlmInferenceRequest) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereLlmInferenceRequest) String

func (m CohereLlmInferenceRequest) String() string

func (CohereLlmInferenceRequest) ValidateEnumValue

func (m CohereLlmInferenceRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereLlmInferenceRequestReturnLikelihoodsEnum

CohereLlmInferenceRequestReturnLikelihoodsEnum Enum with underlying type: string

type CohereLlmInferenceRequestReturnLikelihoodsEnum string

Set of constants representing the allowable values for CohereLlmInferenceRequestReturnLikelihoodsEnum

const (
    CohereLlmInferenceRequestReturnLikelihoodsNone       CohereLlmInferenceRequestReturnLikelihoodsEnum = "NONE"
    CohereLlmInferenceRequestReturnLikelihoodsAll        CohereLlmInferenceRequestReturnLikelihoodsEnum = "ALL"
    CohereLlmInferenceRequestReturnLikelihoodsGeneration CohereLlmInferenceRequestReturnLikelihoodsEnum = "GENERATION"
)

func GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues

func GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues() []CohereLlmInferenceRequestReturnLikelihoodsEnum

GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues Enumerates the set of values for CohereLlmInferenceRequestReturnLikelihoodsEnum

func GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum

func GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum(val string) (CohereLlmInferenceRequestReturnLikelihoodsEnum, bool)

GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum performs case Insensitive comparison on enum value and return the desired enum

type CohereLlmInferenceRequestTruncateEnum

CohereLlmInferenceRequestTruncateEnum Enum with underlying type: string

type CohereLlmInferenceRequestTruncateEnum string

Set of constants representing the allowable values for CohereLlmInferenceRequestTruncateEnum

const (
    CohereLlmInferenceRequestTruncateNone  CohereLlmInferenceRequestTruncateEnum = "NONE"
    CohereLlmInferenceRequestTruncateStart CohereLlmInferenceRequestTruncateEnum = "START"
    CohereLlmInferenceRequestTruncateEnd   CohereLlmInferenceRequestTruncateEnum = "END"
)

func GetCohereLlmInferenceRequestTruncateEnumValues

func GetCohereLlmInferenceRequestTruncateEnumValues() []CohereLlmInferenceRequestTruncateEnum

GetCohereLlmInferenceRequestTruncateEnumValues Enumerates the set of values for CohereLlmInferenceRequestTruncateEnum

func GetMappingCohereLlmInferenceRequestTruncateEnum

func GetMappingCohereLlmInferenceRequestTruncateEnum(val string) (CohereLlmInferenceRequestTruncateEnum, bool)

GetMappingCohereLlmInferenceRequestTruncateEnum performs case Insensitive comparison on enum value and return the desired enum

type CohereLlmInferenceResponse

CohereLlmInferenceResponse The generated text result to return.

type CohereLlmInferenceResponse struct {

    // Each prompt in the input array has an array of GeneratedText, controlled by numGenerations parameter in the request.
    GeneratedTexts []GeneratedText `mandatory:"true" json:"generatedTexts"`

    // The date and time that the model was created in an RFC3339 formatted datetime string.
    TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"`

    // Represents the original prompt. Applies only to non-stream responses.
    Prompt *string `mandatory:"false" json:"prompt"`
}

func (CohereLlmInferenceResponse) MarshalJSON

func (m CohereLlmInferenceResponse) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereLlmInferenceResponse) String

func (m CohereLlmInferenceResponse) String() string

func (CohereLlmInferenceResponse) ValidateEnumValue

func (m CohereLlmInferenceResponse) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereMessage

CohereMessage A message that represents a single chat dialog.

type CohereMessage interface {
}

type CohereMessageRoleEnum

CohereMessageRoleEnum Enum with underlying type: string

type CohereMessageRoleEnum string

Set of constants representing the allowable values for CohereMessageRoleEnum

const (
    CohereMessageRoleChatbot CohereMessageRoleEnum = "CHATBOT"
    CohereMessageRoleUser    CohereMessageRoleEnum = "USER"
    CohereMessageRoleSystem  CohereMessageRoleEnum = "SYSTEM"
    CohereMessageRoleTool    CohereMessageRoleEnum = "TOOL"
)

func GetCohereMessageRoleEnumValues

func GetCohereMessageRoleEnumValues() []CohereMessageRoleEnum

GetCohereMessageRoleEnumValues Enumerates the set of values for CohereMessageRoleEnum

func GetMappingCohereMessageRoleEnum

func GetMappingCohereMessageRoleEnum(val string) (CohereMessageRoleEnum, bool)

GetMappingCohereMessageRoleEnum performs case Insensitive comparison on enum value and return the desired enum

type CohereParameterDefinition

CohereParameterDefinition A definition of tool parameter.

type CohereParameterDefinition struct {

    // The type of the parameter. Must be a valid Python type.
    Type *string `mandatory:"true" json:"type"`

    // The description of the parameter.
    Description *string `mandatory:"false" json:"description"`

    // Denotes whether the parameter is always present (required) or not. Defaults to not required.
    IsRequired *bool `mandatory:"false" json:"isRequired"`
}

func (CohereParameterDefinition) String

func (m CohereParameterDefinition) String() string

func (CohereParameterDefinition) ValidateEnumValue

func (m CohereParameterDefinition) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereSystemMessage

CohereSystemMessage A message that represents a single chat dialog as SYSTEM role.

type CohereSystemMessage struct {

    // Contents of the chat message.
    Message *string `mandatory:"true" json:"message"`
}

func (CohereSystemMessage) MarshalJSON

func (m CohereSystemMessage) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereSystemMessage) String

func (m CohereSystemMessage) String() string

func (CohereSystemMessage) ValidateEnumValue

func (m CohereSystemMessage) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereTool

CohereTool A definition of tool (function).

type CohereTool struct {

    // The name of the tool to be called. Valid names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit.
    Name *string `mandatory:"true" json:"name"`

    // The description of what the tool does, the model uses the description to choose when and how to call the function.
    Description *string `mandatory:"true" json:"description"`

    // The input parameters of the tool.
    ParameterDefinitions map[string]CohereParameterDefinition `mandatory:"false" json:"parameterDefinitions"`
}

func (CohereTool) String

func (m CohereTool) String() string

func (CohereTool) ValidateEnumValue

func (m CohereTool) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereToolCall

CohereToolCall A tool call generated by the model.

type CohereToolCall struct {

    // Name of the tool to call.
    Name *string `mandatory:"true" json:"name"`

    // The parameters to use when invoking a tool.
    Parameters *interface{} `mandatory:"true" json:"parameters"`
}

func (CohereToolCall) String

func (m CohereToolCall) String() string

func (CohereToolCall) ValidateEnumValue

func (m CohereToolCall) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereToolMessage

CohereToolMessage A message that represents a single chat dialog as TOOL role.

type CohereToolMessage struct {

    // A list of results from invoking tools recommended by the model in the previous chat turn.
    ToolResults []CohereToolResult `mandatory:"true" json:"toolResults"`
}

func (CohereToolMessage) MarshalJSON

func (m CohereToolMessage) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereToolMessage) String

func (m CohereToolMessage) String() string

func (CohereToolMessage) ValidateEnumValue

func (m CohereToolMessage) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereToolResult

CohereToolResult The result from invoking tools recommended by the model in the previous chat turn.

type CohereToolResult struct {
    Call *CohereToolCall `mandatory:"true" json:"call"`

    // An array of objects returned by tool.
    Outputs []interface{} `mandatory:"true" json:"outputs"`
}

func (CohereToolResult) String

func (m CohereToolResult) String() string

func (CohereToolResult) ValidateEnumValue

func (m CohereToolResult) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type CohereUserMessage

CohereUserMessage A message that represents a single chat dialog as USER role.

type CohereUserMessage struct {

    // Contents of the chat message.
    Message *string `mandatory:"true" json:"message"`
}

func (CohereUserMessage) MarshalJSON

func (m CohereUserMessage) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (CohereUserMessage) String

func (m CohereUserMessage) String() string

func (CohereUserMessage) ValidateEnumValue

func (m CohereUserMessage) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type DedicatedServingMode

DedicatedServingMode The model's serving mode is dedicated serving and has an endpoint on a dedicated AI cluster.

type DedicatedServingMode struct {

    // The OCID of the endpoint to use.
    EndpointId *string `mandatory:"true" json:"endpointId"`
}

func (DedicatedServingMode) MarshalJSON

func (m DedicatedServingMode) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (DedicatedServingMode) String

func (m DedicatedServingMode) String() string

func (DedicatedServingMode) ValidateEnumValue

func (m DedicatedServingMode) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type EmbedTextDetails

EmbedTextDetails Details for the request to embed texts.

type EmbedTextDetails struct {

    // Provide a list of strings. Each string can be words, a phrase, or a paragraph. The maximum length of each string entry in the list is 512 tokens.
    Inputs []string `mandatory:"true" json:"inputs"`

    ServingMode ServingMode `mandatory:"true" json:"servingMode"`

    // The OCID of compartment in which to call the Generative AI service to create text embeddings.
    CompartmentId *string `mandatory:"true" json:"compartmentId"`

    // Whether or not to include the original inputs in the response. Results are index-based.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // For an input that's longer than the maximum token length, specifies which part of the input text will be truncated.
    Truncate EmbedTextDetailsTruncateEnum `mandatory:"false" json:"truncate,omitempty"`

    // Specifies the input type.
    InputType EmbedTextDetailsInputTypeEnum `mandatory:"false" json:"inputType,omitempty"`
}

func (EmbedTextDetails) String

func (m EmbedTextDetails) String() string

func (*EmbedTextDetails) UnmarshalJSON

func (m *EmbedTextDetails) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (EmbedTextDetails) ValidateEnumValue

func (m EmbedTextDetails) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type EmbedTextDetailsInputTypeEnum

EmbedTextDetailsInputTypeEnum Enum with underlying type: string

type EmbedTextDetailsInputTypeEnum string

Set of constants representing the allowable values for EmbedTextDetailsInputTypeEnum

const (
    EmbedTextDetailsInputTypeSearchDocument EmbedTextDetailsInputTypeEnum = "SEARCH_DOCUMENT"
    EmbedTextDetailsInputTypeSearchQuery    EmbedTextDetailsInputTypeEnum = "SEARCH_QUERY"
    EmbedTextDetailsInputTypeClassification EmbedTextDetailsInputTypeEnum = "CLASSIFICATION"
    EmbedTextDetailsInputTypeClustering     EmbedTextDetailsInputTypeEnum = "CLUSTERING"
)

func GetEmbedTextDetailsInputTypeEnumValues

func GetEmbedTextDetailsInputTypeEnumValues() []EmbedTextDetailsInputTypeEnum

GetEmbedTextDetailsInputTypeEnumValues Enumerates the set of values for EmbedTextDetailsInputTypeEnum

func GetMappingEmbedTextDetailsInputTypeEnum

func GetMappingEmbedTextDetailsInputTypeEnum(val string) (EmbedTextDetailsInputTypeEnum, bool)

GetMappingEmbedTextDetailsInputTypeEnum performs case Insensitive comparison on enum value and return the desired enum

type EmbedTextDetailsTruncateEnum

EmbedTextDetailsTruncateEnum Enum with underlying type: string

type EmbedTextDetailsTruncateEnum string

Set of constants representing the allowable values for EmbedTextDetailsTruncateEnum

const (
    EmbedTextDetailsTruncateNone  EmbedTextDetailsTruncateEnum = "NONE"
    EmbedTextDetailsTruncateStart EmbedTextDetailsTruncateEnum = "START"
    EmbedTextDetailsTruncateEnd   EmbedTextDetailsTruncateEnum = "END"
)

func GetEmbedTextDetailsTruncateEnumValues

func GetEmbedTextDetailsTruncateEnumValues() []EmbedTextDetailsTruncateEnum

GetEmbedTextDetailsTruncateEnumValues Enumerates the set of values for EmbedTextDetailsTruncateEnum

func GetMappingEmbedTextDetailsTruncateEnum

func GetMappingEmbedTextDetailsTruncateEnum(val string) (EmbedTextDetailsTruncateEnum, bool)

GetMappingEmbedTextDetailsTruncateEnum performs case Insensitive comparison on enum value and return the desired enum

type EmbedTextRequest

EmbedTextRequest wrapper for the EmbedText operation

See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.68.0/generativeaiinference/EmbedText.go.html to see an example of how to use EmbedTextRequest.

type EmbedTextRequest struct {

    // Details for generating the embed response.
    EmbedTextDetails `contributesTo:"body"`

    // A token that uniquely identifies a request so it can be retried in case of a timeout or
    // server error without risk of executing that same action again. Retry tokens expire after 24
    // hours, but can be invalidated before that, in case of conflicting operations. For example, if a resource is deleted and purged from the system, then a retry of the original creation request
    // is rejected.
    OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"`

    // The client request ID for tracing.
    OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`

    // Metadata about the request. This information will not be transmitted to the service, but
    // represents information that the SDK will consume to drive retry behavior.
    RequestMetadata common.RequestMetadata
}

func (EmbedTextRequest) BinaryRequestBody

func (request EmbedTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)

BinaryRequestBody implements the OCIRequest interface

func (EmbedTextRequest) HTTPRequest

func (request EmbedTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)

HTTPRequest implements the OCIRequest interface

func (EmbedTextRequest) RetryPolicy

func (request EmbedTextRequest) RetryPolicy() *common.RetryPolicy

RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.

func (EmbedTextRequest) String

func (request EmbedTextRequest) String() string

func (EmbedTextRequest) ValidateEnumValue

func (request EmbedTextRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type EmbedTextResponse

EmbedTextResponse wrapper for the EmbedText operation

type EmbedTextResponse struct {

    // The underlying http response
    RawResponse *http.Response

    // The EmbedTextResult instance
    EmbedTextResult `presentIn:"body"`

    // For optimistic concurrency control. See `if-match`.
    Etag *string `presentIn:"header" name:"etag"`

    // Unique Oracle-assigned identifier for the request. If you need to contact
    // Oracle about a particular request, please provide the request ID.
    OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}

func (EmbedTextResponse) HTTPResponse

func (response EmbedTextResponse) HTTPResponse() *http.Response

HTTPResponse implements the OCIResponse interface

func (EmbedTextResponse) String

func (response EmbedTextResponse) String() string

type EmbedTextResult

EmbedTextResult The generated embedded result to return.

type EmbedTextResult struct {

    // A unique identifier for the generated result.
    Id *string `mandatory:"true" json:"id"`

    // The embeddings corresponding to inputs.
    Embeddings [][]float32 `mandatory:"true" json:"embeddings"`

    // The original inputs. Only present if "isEcho" is set to true.
    Inputs []string `mandatory:"false" json:"inputs"`

    // The OCID of the model used in this inference request.
    ModelId *string `mandatory:"false" json:"modelId"`

    // The version of the model.
    ModelVersion *string `mandatory:"false" json:"modelVersion"`
}

func (EmbedTextResult) String

func (m EmbedTextResult) String() string

func (EmbedTextResult) ValidateEnumValue

func (m EmbedTextResult) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GenerateTextDetails

GenerateTextDetails Details for the request to generate text.

type GenerateTextDetails struct {

    // The OCID of compartment in which to call the Generative AI service to generate text.
    CompartmentId *string `mandatory:"true" json:"compartmentId"`

    ServingMode ServingMode `mandatory:"true" json:"servingMode"`

    InferenceRequest LlmInferenceRequest `mandatory:"true" json:"inferenceRequest"`
}

func (GenerateTextDetails) String

func (m GenerateTextDetails) String() string

func (*GenerateTextDetails) UnmarshalJSON

func (m *GenerateTextDetails) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (GenerateTextDetails) ValidateEnumValue

func (m GenerateTextDetails) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GenerateTextRequest

GenerateTextRequest wrapper for the GenerateText operation

See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.68.0/generativeaiinference/GenerateText.go.html to see an example of how to use GenerateTextRequest.

type GenerateTextRequest struct {

    // Details for generating the text response.
    GenerateTextDetails `contributesTo:"body"`

    // A token that uniquely identifies a request so it can be retried in case of a timeout or
    // server error without risk of executing that same action again. Retry tokens expire after 24
    // hours, but can be invalidated before that, in case of conflicting operations. For example, if a resource is deleted and purged from the system, then a retry of the original creation request
    // is rejected.
    OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"`

    // The client request ID for tracing.
    OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`

    // Metadata about the request. This information will not be transmitted to the service, but
    // represents information that the SDK will consume to drive retry behavior.
    RequestMetadata common.RequestMetadata
}

func (GenerateTextRequest) BinaryRequestBody

func (request GenerateTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)

BinaryRequestBody implements the OCIRequest interface

func (GenerateTextRequest) HTTPRequest

func (request GenerateTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)

HTTPRequest implements the OCIRequest interface

func (GenerateTextRequest) RetryPolicy

func (request GenerateTextRequest) RetryPolicy() *common.RetryPolicy

RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.

func (GenerateTextRequest) String

func (request GenerateTextRequest) String() string

func (GenerateTextRequest) ValidateEnumValue

func (request GenerateTextRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GenerateTextResponse

GenerateTextResponse wrapper for the GenerateText operation

type GenerateTextResponse struct {

    // The underlying http response
    RawResponse *http.Response

    // The GenerateTextResult instance
    GenerateTextResult `presentIn:"body"`

    // For optimistic concurrency control. See `if-match`.
    Etag *string `presentIn:"header" name:"etag"`

    // Unique Oracle-assigned identifier for the request. If you need to contact
    // Oracle about a particular request, please provide the request ID.
    OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}

func (GenerateTextResponse) HTTPResponse

func (response GenerateTextResponse) HTTPResponse() *http.Response

HTTPResponse implements the OCIResponse interface

func (GenerateTextResponse) String

func (response GenerateTextResponse) String() string

type GenerateTextResult

GenerateTextResult The generated text result to return.

type GenerateTextResult struct {

    // The OCID of the model used in this inference request.
    ModelId *string `mandatory:"true" json:"modelId"`

    // The version of the model.
    ModelVersion *string `mandatory:"true" json:"modelVersion"`

    InferenceResponse LlmInferenceResponse `mandatory:"true" json:"inferenceResponse"`
}

func (GenerateTextResult) String

func (m GenerateTextResult) String() string

func (*GenerateTextResult) UnmarshalJSON

func (m *GenerateTextResult) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (GenerateTextResult) ValidateEnumValue

func (m GenerateTextResult) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GeneratedText

GeneratedText The text generated during each run.

type GeneratedText struct {

    // A unique identifier for this text generation.
    Id *string `mandatory:"true" json:"id"`

    // The generated text.
    Text *string `mandatory:"true" json:"text"`

    // The overall likelihood of the generated text.
    // When a large language model generates a new token for the output text, a likelihood is assigned to all tokens, where tokens with higher likelihoods are more likely to follow the current token. For example, it's more likely that the word favorite is followed by the word food or book rather than the word zebra. A lower likelihood means that it's less likely that token follows the current token.
    Likelihood *float64 `mandatory:"true" json:"likelihood"`

    // The reason why the model stopped generating tokens.
    // A model stops generating tokens if the model hits a natural stop point or reaches a provided stop sequence.
    FinishReason *string `mandatory:"false" json:"finishReason"`

    // A collection of generated tokens and their corresponding likelihoods.
    TokenLikelihoods []TokenLikelihood `mandatory:"false" json:"tokenLikelihoods"`
}

func (GeneratedText) String

func (m GeneratedText) String() string

func (GeneratedText) ValidateEnumValue

func (m GeneratedText) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GenerativeAiInferenceClient

GenerativeAiInferenceClient a client for GenerativeAiInference

type GenerativeAiInferenceClient struct {
    common.BaseClient
    // contains filtered or unexported fields
}

func NewGenerativeAiInferenceClientWithConfigurationProvider

func NewGenerativeAiInferenceClientWithConfigurationProvider(configProvider common.ConfigurationProvider) (client GenerativeAiInferenceClient, err error)

NewGenerativeAiInferenceClientWithConfigurationProvider Creates a new default GenerativeAiInference client with the given configuration provider. the configuration provider will be used for the default signer as well as reading the region

func NewGenerativeAiInferenceClientWithOboToken

func NewGenerativeAiInferenceClientWithOboToken(configProvider common.ConfigurationProvider, oboToken string) (client GenerativeAiInferenceClient, err error)

NewGenerativeAiInferenceClientWithOboToken Creates a new default GenerativeAiInference client with the given configuration provider. The obotoken will be added to default headers and signed; the configuration provider will be used for the signer

as well as reading the region

func (GenerativeAiInferenceClient) Chat

func (client GenerativeAiInferenceClient) Chat(ctx context.Context, request ChatRequest) (response ChatResponse, err error)

Chat Creates a response for the given conversation.

See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.68.0/generativeaiinference/Chat.go.html to see an example of how to use Chat API. A default retry strategy applies to this operation Chat()

func (*GenerativeAiInferenceClient) ConfigurationProvider

func (client *GenerativeAiInferenceClient) ConfigurationProvider() *common.ConfigurationProvider

ConfigurationProvider the ConfigurationProvider used in this client, or null if none set

func (GenerativeAiInferenceClient) EmbedText

func (client GenerativeAiInferenceClient) EmbedText(ctx context.Context, request EmbedTextRequest) (response EmbedTextResponse, err error)

EmbedText Produces embeddings for the inputs. An embedding is numeric representation of a piece of text. This text can be a phrase, a sentence, or one or more paragraphs. The Generative AI embedding model transforms each phrase, sentence, or paragraph that you input, into an array with 1024 numbers. You can use these embeddings for finding similarity in your input text such as finding phrases that are similar in context or category. Embeddings are mostly used for semantic searches where the search function focuses on the meaning of the text that it's searching through rather than finding results based on keywords.

See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.68.0/generativeaiinference/EmbedText.go.html to see an example of how to use EmbedText API. A default retry strategy applies to this operation EmbedText()

func (GenerativeAiInferenceClient) GenerateText

func (client GenerativeAiInferenceClient) GenerateText(ctx context.Context, request GenerateTextRequest) (response GenerateTextResponse, err error)

GenerateText Generates a text response based on the user prompt.

See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.68.0/generativeaiinference/GenerateText.go.html to see an example of how to use GenerateText API. A default retry strategy applies to this operation GenerateText()

func (*GenerativeAiInferenceClient) SetRegion

func (client *GenerativeAiInferenceClient) SetRegion(region string)

SetRegion overrides the region of this client.

func (GenerativeAiInferenceClient) SummarizeText

func (client GenerativeAiInferenceClient) SummarizeText(ctx context.Context, request SummarizeTextRequest) (response SummarizeTextResponse, err error)

SummarizeText Summarizes the input text.

See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.68.0/generativeaiinference/SummarizeText.go.html to see an example of how to use SummarizeText API. A default retry strategy applies to this operation SummarizeText()

type GenericChatRequest

GenericChatRequest Details for the chat request.

type GenericChatRequest struct {

    // The series of messages in a chat request. Includes the previous messages in a conversation. Each message includes a role (`USER` or the `CHATBOT`) and content.
    Messages []Message `mandatory:"false" json:"messages"`

    // Whether to stream back partial progress. If set to true, as tokens become available, they are sent as data-only server-sent events.
    IsStream *bool `mandatory:"false" json:"isStream"`

    // The number of of generated texts that will be returned.
    NumGenerations *int `mandatory:"false" json:"numGenerations"`

    // Whether to include the user prompt in the response. Applies only to non-stream results.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is -1 which means to consider all tokens. Setting to 0 disables this method and considers all tokens.
    // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen.
    TopK *int `mandatory:"false" json:"topK"`

    // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step.
    // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k.
    TopP *float64 `mandatory:"false" json:"topP"`

    // A number that sets the randomness of the generated output. A lower temperature means a less random generations.
    // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.
    Temperature *float64 `mandatory:"false" json:"temperature"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. Set to 0 to disable.
    FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens.
    // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable.
    PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"`

    // List of strings that stop the generation if they are generated for the response text. The returned output will not contain the stop strings.
    Stop []string `mandatory:"false" json:"stop"`

    // Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens.
    // For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response.
    LogProbs *int `mandatory:"false" json:"logProbs"`

    // The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus `maxTokens` must not exceed the model's context length.
    // Not setting a value for maxTokens results in the possible use of model's full context length.
    MaxTokens *int `mandatory:"false" json:"maxTokens"`

    // Modifies the likelihood of specified tokens that appear in the completion.
    // Example: '{"6395": 2, "8134": 1, "21943": 0.5, "5923": -100}'
    LogitBias *interface{} `mandatory:"false" json:"logitBias"`
}

func (GenericChatRequest) MarshalJSON

func (m GenericChatRequest) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (GenericChatRequest) String

func (m GenericChatRequest) String() string

func (*GenericChatRequest) UnmarshalJSON

func (m *GenericChatRequest) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (GenericChatRequest) ValidateEnumValue

func (m GenericChatRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type GenericChatResponse

GenericChatResponse The response for a chat conversation.

type GenericChatResponse struct {

    // The Unix timestamp (in seconds) of when the response text was generated.
    TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"`

    // A list of generated texts. Can be more than one if n is greater than 1.
    Choices []ChatChoice `mandatory:"true" json:"choices"`
}

func (GenericChatResponse) MarshalJSON

func (m GenericChatResponse) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (GenericChatResponse) String

func (m GenericChatResponse) String() string

func (GenericChatResponse) ValidateEnumValue

func (m GenericChatResponse) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type LlamaLlmInferenceRequest

LlamaLlmInferenceRequest Details for the text generation request for Llama models.

type LlamaLlmInferenceRequest struct {

    // Represents the prompt to be completed. The trailing white spaces are trimmed before completion.
    Prompt *string `mandatory:"false" json:"prompt"`

    // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available.
    IsStream *bool `mandatory:"false" json:"isStream"`

    // The number of of generated texts that will be returned.
    NumGenerations *int `mandatory:"false" json:"numGenerations"`

    // Whether or not to return the user prompt in the response. Applies only to non-stream results.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is -1 which means to consider all tokens. Setting to 0 disables this method and considers all tokens.
    // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen.
    TopK *int `mandatory:"false" json:"topK"`

    // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step.
    // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k.
    TopP *float64 `mandatory:"false" json:"topP"`

    // A number that sets the randomness of the generated output. A lower temperature means a less random generations.
    // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.
    Temperature *float64 `mandatory:"false" json:"temperature"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. Set to 0 to disable.
    FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"`

    // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens.
    // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable.
    PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"`

    // List of strings that stop the generation if they are generated for the response text. The returned output will not contain the stop strings.
    Stop []string `mandatory:"false" json:"stop"`

    // Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens.
    // For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response.
    LogProbs *int `mandatory:"false" json:"logProbs"`

    // The maximum number of tokens that can be generated per output sequence. The token count of the prompt plus `maxTokens` cannot exceed the model's context length.
    MaxTokens *int `mandatory:"false" json:"maxTokens"`
}

func (LlamaLlmInferenceRequest) MarshalJSON

func (m LlamaLlmInferenceRequest) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (LlamaLlmInferenceRequest) String

func (m LlamaLlmInferenceRequest) String() string

func (LlamaLlmInferenceRequest) ValidateEnumValue

func (m LlamaLlmInferenceRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type LlamaLlmInferenceResponse

LlamaLlmInferenceResponse The generated text result to return.

type LlamaLlmInferenceResponse struct {

    // The Unix timestamp (in seconds) of when the generation was created.
    Created *common.SDKTime `mandatory:"true" json:"created"`

    // A list of generated texts. Can be more than one if n is greater than 1.
    Choices []Choice `mandatory:"true" json:"choices"`
}

func (LlamaLlmInferenceResponse) MarshalJSON

func (m LlamaLlmInferenceResponse) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (LlamaLlmInferenceResponse) String

func (m LlamaLlmInferenceResponse) String() string

func (LlamaLlmInferenceResponse) ValidateEnumValue

func (m LlamaLlmInferenceResponse) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type LlmInferenceRequest

LlmInferenceRequest The base class for the inference requests.

type LlmInferenceRequest interface {
}

type LlmInferenceRequestRuntimeTypeEnum

LlmInferenceRequestRuntimeTypeEnum Enum with underlying type: string

type LlmInferenceRequestRuntimeTypeEnum string

Set of constants representing the allowable values for LlmInferenceRequestRuntimeTypeEnum

const (
    LlmInferenceRequestRuntimeTypeCohere LlmInferenceRequestRuntimeTypeEnum = "COHERE"
    LlmInferenceRequestRuntimeTypeLlama  LlmInferenceRequestRuntimeTypeEnum = "LLAMA"
)

func GetLlmInferenceRequestRuntimeTypeEnumValues

func GetLlmInferenceRequestRuntimeTypeEnumValues() []LlmInferenceRequestRuntimeTypeEnum

GetLlmInferenceRequestRuntimeTypeEnumValues Enumerates the set of values for LlmInferenceRequestRuntimeTypeEnum

func GetMappingLlmInferenceRequestRuntimeTypeEnum

func GetMappingLlmInferenceRequestRuntimeTypeEnum(val string) (LlmInferenceRequestRuntimeTypeEnum, bool)

GetMappingLlmInferenceRequestRuntimeTypeEnum performs case Insensitive comparison on enum value and return the desired enum

type LlmInferenceResponse

LlmInferenceResponse The base class for inference responses.

type LlmInferenceResponse interface {
}

type LlmInferenceResponseRuntimeTypeEnum

LlmInferenceResponseRuntimeTypeEnum Enum with underlying type: string

type LlmInferenceResponseRuntimeTypeEnum string

Set of constants representing the allowable values for LlmInferenceResponseRuntimeTypeEnum

const (
    LlmInferenceResponseRuntimeTypeCohere LlmInferenceResponseRuntimeTypeEnum = "COHERE"
    LlmInferenceResponseRuntimeTypeLlama  LlmInferenceResponseRuntimeTypeEnum = "LLAMA"
)

func GetLlmInferenceResponseRuntimeTypeEnumValues

func GetLlmInferenceResponseRuntimeTypeEnumValues() []LlmInferenceResponseRuntimeTypeEnum

GetLlmInferenceResponseRuntimeTypeEnumValues Enumerates the set of values for LlmInferenceResponseRuntimeTypeEnum

func GetMappingLlmInferenceResponseRuntimeTypeEnum

func GetMappingLlmInferenceResponseRuntimeTypeEnum(val string) (LlmInferenceResponseRuntimeTypeEnum, bool)

GetMappingLlmInferenceResponseRuntimeTypeEnum performs case Insensitive comparison on enum value and return the desired enum

type Logprobs

Logprobs Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens. For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response.

type Logprobs struct {

    // The text offset.
    TextOffset []int `mandatory:"false" json:"textOffset"`

    // The logarithmic probabilites of the output token.
    TokenLogprobs []float64 `mandatory:"false" json:"tokenLogprobs"`

    // The list of output tokens.
    Tokens []string `mandatory:"false" json:"tokens"`

    // The logarithmic probabilities of each of the top k tokens.
    TopLogprobs []map[string]string `mandatory:"false" json:"topLogprobs"`
}

func (Logprobs) String

func (m Logprobs) String() string

func (Logprobs) ValidateEnumValue

func (m Logprobs) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type Message

Message A message that represents a single chat dialog.

type Message interface {

    // Contents of the chat message.
    GetContent() []ChatContent
}

type MessageRoleEnum

MessageRoleEnum Enum with underlying type: string

type MessageRoleEnum string

Set of constants representing the allowable values for MessageRoleEnum

const (
    MessageRoleSystem    MessageRoleEnum = "SYSTEM"
    MessageRoleUser      MessageRoleEnum = "USER"
    MessageRoleAssistant MessageRoleEnum = "ASSISTANT"
)

func GetMappingMessageRoleEnum

func GetMappingMessageRoleEnum(val string) (MessageRoleEnum, bool)

GetMappingMessageRoleEnum performs case Insensitive comparison on enum value and return the desired enum

func GetMessageRoleEnumValues

func GetMessageRoleEnumValues() []MessageRoleEnum

GetMessageRoleEnumValues Enumerates the set of values for MessageRoleEnum

type OnDemandServingMode

OnDemandServingMode The model's serving mode is on-demand serving on a shared infrastructure.

type OnDemandServingMode struct {

    // The unique ID of a model to use. You can use the ListModels API to list the available models.
    ModelId *string `mandatory:"true" json:"modelId"`
}

func (OnDemandServingMode) MarshalJSON

func (m OnDemandServingMode) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (OnDemandServingMode) String

func (m OnDemandServingMode) String() string

func (OnDemandServingMode) ValidateEnumValue

func (m OnDemandServingMode) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type SearchQuery

SearchQuery The generated search query.

type SearchQuery struct {

    // The text of the search query.
    Text *string `mandatory:"true" json:"text"`
}

func (SearchQuery) String

func (m SearchQuery) String() string

func (SearchQuery) ValidateEnumValue

func (m SearchQuery) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type ServingMode

ServingMode The model's serving mode, which is either on-demand serving or dedicated serving.

type ServingMode interface {
}

type ServingModeServingTypeEnum

ServingModeServingTypeEnum Enum with underlying type: string

type ServingModeServingTypeEnum string

Set of constants representing the allowable values for ServingModeServingTypeEnum

const (
    ServingModeServingTypeOnDemand  ServingModeServingTypeEnum = "ON_DEMAND"
    ServingModeServingTypeDedicated ServingModeServingTypeEnum = "DEDICATED"
)

func GetMappingServingModeServingTypeEnum

func GetMappingServingModeServingTypeEnum(val string) (ServingModeServingTypeEnum, bool)

GetMappingServingModeServingTypeEnum performs case Insensitive comparison on enum value and return the desired enum

func GetServingModeServingTypeEnumValues

func GetServingModeServingTypeEnumValues() []ServingModeServingTypeEnum

GetServingModeServingTypeEnumValues Enumerates the set of values for ServingModeServingTypeEnum

type SummarizeTextDetails

SummarizeTextDetails Details for the request to summarize text.

type SummarizeTextDetails struct {

    // The input string to be summarized.
    Input *string `mandatory:"true" json:"input"`

    ServingMode ServingMode `mandatory:"true" json:"servingMode"`

    // The OCID of compartment in which to call the Generative AI service to summarize text.
    CompartmentId *string `mandatory:"true" json:"compartmentId"`

    // Whether or not to include the original inputs in the response.
    IsEcho *bool `mandatory:"false" json:"isEcho"`

    // A number that sets the randomness of the generated output. Lower temperatures mean less random generations.
    // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0, and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.
    Temperature *float64 `mandatory:"false" json:"temperature"`

    // A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". For example, "focusing on the next steps" or "written by Yoda".
    AdditionalCommand *string `mandatory:"false" json:"additionalCommand"`

    // Indicates the approximate length of the summary. If "AUTO" is selected, the best option will be picked based on the input text.
    Length SummarizeTextDetailsLengthEnum `mandatory:"false" json:"length,omitempty"`

    // Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If "AUTO" is selected, the best option will be picked based on the input text.
    Format SummarizeTextDetailsFormatEnum `mandatory:"false" json:"format,omitempty"`

    // Controls how close to the original text the summary is. High extractiveness summaries will lean towards reusing sentences verbatim, while low extractiveness summaries will tend to paraphrase more.
    Extractiveness SummarizeTextDetailsExtractivenessEnum `mandatory:"false" json:"extractiveness,omitempty"`
}

func (SummarizeTextDetails) String

func (m SummarizeTextDetails) String() string

func (*SummarizeTextDetails) UnmarshalJSON

func (m *SummarizeTextDetails) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (SummarizeTextDetails) ValidateEnumValue

func (m SummarizeTextDetails) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type SummarizeTextDetailsExtractivenessEnum

SummarizeTextDetailsExtractivenessEnum Enum with underlying type: string

type SummarizeTextDetailsExtractivenessEnum string

Set of constants representing the allowable values for SummarizeTextDetailsExtractivenessEnum

const (
    SummarizeTextDetailsExtractivenessLow    SummarizeTextDetailsExtractivenessEnum = "LOW"
    SummarizeTextDetailsExtractivenessMedium SummarizeTextDetailsExtractivenessEnum = "MEDIUM"
    SummarizeTextDetailsExtractivenessHigh   SummarizeTextDetailsExtractivenessEnum = "HIGH"
    SummarizeTextDetailsExtractivenessAuto   SummarizeTextDetailsExtractivenessEnum = "AUTO"
)

func GetMappingSummarizeTextDetailsExtractivenessEnum

func GetMappingSummarizeTextDetailsExtractivenessEnum(val string) (SummarizeTextDetailsExtractivenessEnum, bool)

GetMappingSummarizeTextDetailsExtractivenessEnum performs case Insensitive comparison on enum value and return the desired enum

func GetSummarizeTextDetailsExtractivenessEnumValues

func GetSummarizeTextDetailsExtractivenessEnumValues() []SummarizeTextDetailsExtractivenessEnum

GetSummarizeTextDetailsExtractivenessEnumValues Enumerates the set of values for SummarizeTextDetailsExtractivenessEnum

type SummarizeTextDetailsFormatEnum

SummarizeTextDetailsFormatEnum Enum with underlying type: string

type SummarizeTextDetailsFormatEnum string

Set of constants representing the allowable values for SummarizeTextDetailsFormatEnum

const (
    SummarizeTextDetailsFormatParagraph SummarizeTextDetailsFormatEnum = "PARAGRAPH"
    SummarizeTextDetailsFormatBullets   SummarizeTextDetailsFormatEnum = "BULLETS"
    SummarizeTextDetailsFormatAuto      SummarizeTextDetailsFormatEnum = "AUTO"
)

func GetMappingSummarizeTextDetailsFormatEnum

func GetMappingSummarizeTextDetailsFormatEnum(val string) (SummarizeTextDetailsFormatEnum, bool)

GetMappingSummarizeTextDetailsFormatEnum performs case Insensitive comparison on enum value and return the desired enum

func GetSummarizeTextDetailsFormatEnumValues

func GetSummarizeTextDetailsFormatEnumValues() []SummarizeTextDetailsFormatEnum

GetSummarizeTextDetailsFormatEnumValues Enumerates the set of values for SummarizeTextDetailsFormatEnum

type SummarizeTextDetailsLengthEnum

SummarizeTextDetailsLengthEnum Enum with underlying type: string

type SummarizeTextDetailsLengthEnum string

Set of constants representing the allowable values for SummarizeTextDetailsLengthEnum

const (
    SummarizeTextDetailsLengthShort  SummarizeTextDetailsLengthEnum = "SHORT"
    SummarizeTextDetailsLengthMedium SummarizeTextDetailsLengthEnum = "MEDIUM"
    SummarizeTextDetailsLengthLong   SummarizeTextDetailsLengthEnum = "LONG"
    SummarizeTextDetailsLengthAuto   SummarizeTextDetailsLengthEnum = "AUTO"
)

func GetMappingSummarizeTextDetailsLengthEnum

func GetMappingSummarizeTextDetailsLengthEnum(val string) (SummarizeTextDetailsLengthEnum, bool)

GetMappingSummarizeTextDetailsLengthEnum performs case Insensitive comparison on enum value and return the desired enum

func GetSummarizeTextDetailsLengthEnumValues

func GetSummarizeTextDetailsLengthEnumValues() []SummarizeTextDetailsLengthEnum

GetSummarizeTextDetailsLengthEnumValues Enumerates the set of values for SummarizeTextDetailsLengthEnum

type SummarizeTextRequest

SummarizeTextRequest wrapper for the SummarizeText operation

See also

Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/65.68.0/generativeaiinference/SummarizeText.go.html to see an example of how to use SummarizeTextRequest.

type SummarizeTextRequest struct {

    // Details for summarizing the text.
    SummarizeTextDetails `contributesTo:"body"`

    // A token that uniquely identifies a request so it can be retried in case of a timeout or
    // server error without risk of executing that same action again. Retry tokens expire after 24
    // hours, but can be invalidated before that, in case of conflicting operations. For example, if a resource is deleted and purged from the system, then a retry of the original creation request
    // is rejected.
    OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"`

    // The client request ID for tracing.
    OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`

    // Metadata about the request. This information will not be transmitted to the service, but
    // represents information that the SDK will consume to drive retry behavior.
    RequestMetadata common.RequestMetadata
}

func (SummarizeTextRequest) BinaryRequestBody

func (request SummarizeTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)

BinaryRequestBody implements the OCIRequest interface

func (SummarizeTextRequest) HTTPRequest

func (request SummarizeTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)

HTTPRequest implements the OCIRequest interface

func (SummarizeTextRequest) RetryPolicy

func (request SummarizeTextRequest) RetryPolicy() *common.RetryPolicy

RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.

func (SummarizeTextRequest) String

func (request SummarizeTextRequest) String() string

func (SummarizeTextRequest) ValidateEnumValue

func (request SummarizeTextRequest) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type SummarizeTextResponse

SummarizeTextResponse wrapper for the SummarizeText operation

type SummarizeTextResponse struct {

    // The underlying http response
    RawResponse *http.Response

    // The SummarizeTextResult instance
    SummarizeTextResult `presentIn:"body"`

    // For optimistic concurrency control. See `if-match`.
    Etag *string `presentIn:"header" name:"etag"`

    // Unique Oracle-assigned identifier for the request. If you need to contact
    // Oracle about a particular request, please provide the request ID.
    OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}

func (SummarizeTextResponse) HTTPResponse

func (response SummarizeTextResponse) HTTPResponse() *http.Response

HTTPResponse implements the OCIResponse interface

func (SummarizeTextResponse) String

func (response SummarizeTextResponse) String() string

type SummarizeTextResult

SummarizeTextResult Summarize text result to return to caller.

type SummarizeTextResult struct {

    // A unique identifier for this SummarizeTextResult.
    Id *string `mandatory:"true" json:"id"`

    // Summary result corresponding to input.
    Summary *string `mandatory:"true" json:"summary"`

    // The original input. Only included if "isEcho" set to true.
    Input *string `mandatory:"false" json:"input"`

    // The OCID of the model used in this inference request.
    ModelId *string `mandatory:"false" json:"modelId"`

    // The version of the model.
    ModelVersion *string `mandatory:"false" json:"modelVersion"`
}

func (SummarizeTextResult) String

func (m SummarizeTextResult) String() string

func (SummarizeTextResult) ValidateEnumValue

func (m SummarizeTextResult) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type SystemMessage

SystemMessage Represents a single instance of system message.

type SystemMessage struct {

    // Contents of the chat message.
    Content []ChatContent `mandatory:"false" json:"content"`

    // An optional name for the participant. Provides the model information to differentiate between participants of the same role.
    Name *string `mandatory:"false" json:"name"`
}

func (SystemMessage) GetContent

func (m SystemMessage) GetContent() []ChatContent

GetContent returns Content

func (SystemMessage) MarshalJSON

func (m SystemMessage) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (SystemMessage) String

func (m SystemMessage) String() string

func (*SystemMessage) UnmarshalJSON

func (m *SystemMessage) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (SystemMessage) ValidateEnumValue

func (m SystemMessage) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type TextContent

TextContent Represents a single instance of text in the chat content.

type TextContent struct {

    // The text content.
    Text *string `mandatory:"false" json:"text"`
}

func (TextContent) MarshalJSON

func (m TextContent) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (TextContent) String

func (m TextContent) String() string

func (TextContent) ValidateEnumValue

func (m TextContent) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type TokenLikelihood

TokenLikelihood An object that contains the returned token and its corresponding likelihood.

type TokenLikelihood struct {

    // A word, part of a word, or a punctuation.
    // For example, apple is a token and friendship is made up of two tokens, friend and ship. When you run a model, you can set the maximum number of output tokens. Estimate three tokens per word.
    Token *string `mandatory:"false" json:"token"`

    // The likelihood of this token during generation.
    Likelihood *float64 `mandatory:"false" json:"likelihood"`
}

func (TokenLikelihood) String

func (m TokenLikelihood) String() string

func (TokenLikelihood) ValidateEnumValue

func (m TokenLikelihood) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly

type UserMessage

UserMessage Represents a single instance of user message.

type UserMessage struct {

    // Contents of the chat message.
    Content []ChatContent `mandatory:"false" json:"content"`

    // An optional name for the participant. Provides the model information to differentiate between participants of the same role.
    Name *string `mandatory:"false" json:"name"`
}

func (UserMessage) GetContent

func (m UserMessage) GetContent() []ChatContent

GetContent returns Content

func (UserMessage) MarshalJSON

func (m UserMessage) MarshalJSON() (buff []byte, e error)

MarshalJSON marshals to json representation

func (UserMessage) String

func (m UserMessage) String() string

func (*UserMessage) UnmarshalJSON

func (m *UserMessage) UnmarshalJSON(data []byte) (e error)

UnmarshalJSON unmarshals from json

func (UserMessage) ValidateEnumValue

func (m UserMessage) ValidateEnumValue() (bool, error)

ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly