// This is an automatically generated code sample. 
// To make this code sample work in your Oracle Cloud tenancy, 
// please replace the values for any parameters whose current values do not fit
// your use case (such as resource IDs, strings containing ‘EXAMPLE’ or ‘unique_id’, and 
// boolean, number, and enum parameters with values not fitting your use case).

import * as generativeaiinference from "oci-generativeaiinference";
import common = require("oci-common");

// Create a default authentication provider that uses the DEFAULT
// profile in the configuration file.
// Refer to <see href="https://docs.cloud.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm#SDK_and_CLI_Configuration_File>the public documentation</see> on how to prepare a configuration file.

const provider: common.ConfigFileAuthenticationDetailsProvider = new common.ConfigFileAuthenticationDetailsProvider();

(async () => {
    try {
     // Create a service client
     const client = new generativeaiinference.GenerativeAiInferenceClient({ authenticationDetailsProvider: provider });

     // Create a request and dependent object(s).
	const  chatDetails = {
		compartmentId: "ocid1.test.oc1..<unique_ID>EXAMPLE-compartmentId-Value",
		servingMode: {
			servingType: "ON_DEMAND",			modelId: "ocid1.test.oc1..<unique_ID>EXAMPLE-modelId-Value"

			}
,
		chatRequest: {
			apiFormat: "COHEREV2",			messages: [{
					role: "TOOL",					toolCallId: "ocid1.test.oc1..<unique_ID>EXAMPLE-toolCallId-Value",
					content: [{
							type: "DOCUMENT",							document: "EXAMPLE-document-Value"

							}
							]

					}
					],
			documents: ["EXAMPLE-documents-Value"					],
			citationOptions: {
				mode: generativeaiinference.models.CitationOptionsV2.Mode.Fast,

				}
,
			toolsChoice: generativeaiinference.models.CohereChatRequestV2.ToolsChoice.Required,
			tools: [{
					type: generativeaiinference.models.CohereToolV2.Type.Function,
					function: {
						name: "EXAMPLE-name-Value",
						description: "EXAMPLE-description-Value",
						parameters: "EXAMPLE-parameters-Value",

						}
,

					}
					],
			isStrictToolsEnabled: true,
			isLogProbsEnabled: false,
			thinking: {
				type: generativeaiinference.models.CohereThinkingV2.Type.Disabled,
				tokenBudget: 203,

				}
,
			responseFormat: {
				type: "TEXT",
				}
,
			isSearchQueriesOnly: false,
			streamOptions: {
				isIncludeUsage: false,

				}
,
			isStream: true,
			maxTokens: 887,
			temperature: 0.13406122,
			topK: 36,
			topP: 0.30801296,
			frequencyPenalty: 0.90215766,
			presencePenalty: 0.7918264,
			seed: 937,
			stopSequences: ["EXAMPLE--Value"					],
			priority: 404,
			isRawPrompting: true,
			safetyMode: generativeaiinference.models.CohereChatRequestV2.SafetyMode.Off

			}
,

		}

	const chatRequest: generativeaiinference.requests.ChatRequest = { 
	chatDetails: chatDetails,
	opcRetryToken: "EXAMPLE-opcRetryToken-Value",
	opcRequestId: "NOYUHSDU8YXMQ1XEONMW<unique_ID>",

		}; 


     // Send request to the Client.
     const chatResponse = await client.chat(chatRequest);
     }
     catch (error) {
         console.log("chat Failed with error  " + error);
     }
})();