from typing import Optional, List, Dict, Any from oci.addons.adk import Toolkit, tool import oci from get_config import get_config config = get_config() compartment_id = config["tenancy"] region = config["region"] config = oci.config.from_file() generative_ai_client = oci.generative_ai.GenerativeAiClient(config) class chat(Toolkit): @tool def chat_response(self, user_input: str, model_id: str) -> Optional[Any]: """ Generate text response using OCI Generative AI service. Args: user_input: The user's input text to generate a response for. model_id: The OCID of the model to use for text generation. Returns: The generated text response from the model. """ endpoint = f"https://inference.generativeai.{region}.oci.oraclecloud.com" generative_ai_inference_client = oci.generative_ai_inference.GenerativeAiInferenceClient(config=config, service_endpoint=endpoint, retry_strategy=oci.retry.NoneRetryStrategy(), timeout=(10, 240)) chat_detail = oci.generative_ai_inference.models.ChatDetails() content = oci.generative_ai_inference.models.TextContent() content.text = user_input message = oci.generative_ai_inference.models.Message() message.role = "USER" message.content = [content] chat_request = oci.generative_ai_inference.models.GenericChatRequest() chat_request.api_format = oci.generative_ai_inference.models.BaseChatRequest.API_FORMAT_GENERIC chat_request.messages = [message] chat_request.max_tokens = 4000 chat_request.temperature = 0.75 chat_request.frequency_penalty = 0 chat_request.presence_penalty = 0 chat_request.top_p = 0.75 chat_detail.serving_mode = oci.generative_ai_inference.models.OnDemandServingMode(model_id=model_id) chat_detail.chat_request = chat_request chat_detail.compartment_id = compartment_id chat_response = generative_ai_inference_client.chat(chat_detail) chat_data = vars(chat_response)["data"].chat_response.choices[0].message.content[0].text return chat_data