Semantic kernel using google gemini and API key

If you're thinking about using Google Gemini as your model in semantic kernel, then you come to the right place. This is how you can setup 

Run the following command to setup and install the required dependencies

uv add semantic-kernel[google]

Then you all you need is your Gemini API key

import asyncio

from semantic_kernel import Kernel
from semantic_kernel.utils.logging import setup_logging
from semantic_kernel.functions import kernel_function
from semantic_kernel.connectors.ai.google.google_ai import GoogleAIChatCompletion
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior
from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.functions.kernel_arguments import KernelArguments

from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import (
    AzureChatPromptExecutionSettings,
)

async def main():
    # Initialize the kernel
    # Add Azure OpenAI chat completion
    chat_completion = GoogleAIChatCompletion(
        gemini_model_id="gemini-2.5-flash-lite",
        api_key="YOUR-API-KEY",
        service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
    )

    from semantic_kernel import Kernel

    # Initialize the kernel
    kernel = Kernel()

    kernel.add_service(chat_completion)

    # Enable planning
    # execution_settings = AzureChatPromptExecutionSettings()
    # execution_settings.function_choice_behavior = FunctionChoiceBehavior.Auto()

    # Create a history of the conversation
    from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase

    # Retrieve the chat completion service by type
    chat_completion_service = kernel.get_service(type=ChatCompletionClientBase)

    # Retrieve the chat completion service by id
    chat_completion_service = kernel.get_service(service_id="my-service-id")

    # Retrieve the default inference settings
    execution_settings = kernel.get_prompt_execution_settings_from_service_id("my-service-id")


    from semantic_kernel.connectors.ai.google.google_ai import GoogleAIChatPromptExecutionSettings

    execution_settings = GoogleAIChatPromptExecutionSettings()

    # chat_history = ChatHistory()
    # chat_history.add_user_message("Hello, how are you?")

    # response = await chat_completion_service.get_chat_message_content(
    #     chat_history=history,
    #     settings=execution_settings,
    # )

    # print(response)

    chat_history = ChatHistory()
    chat_history.add_user_message("Hello, how are you?")

    response = chat_completion_service.get_streaming_chat_message_content(
        chat_history=chat_history,
        settings=execution_settings,
    )

    async for chunk in response:
        print(chunk, end="")


if __name__ == "__main__":
    asyncio.run(main())

Comments

Popular posts from this blog

vllm : Failed to infer device type

NodeJS: Error: spawn EINVAL in window for node version 20.20 and 18.20

android studio kotlin source is null error