Installation

Install the Prompt Foundry SDK

pip install prompt_foundry_python_sdk

Integration

The full API of this library can be found in the API Reference page by selecting Python in the interactive examples.

Option 1 - Completion Proxy

This option requires you to add a provider API key in your organization settings.

Initiates a completion request to the configured LLM provider using specified parameters and provided variables. This endpoint abstracts the integration with different model providers, enabling seamless switching between models while maintaining a consistent data model for your application.

import os
from prompt_foundry_python_sdk import PromptFoundry

client = PromptFoundry(
    # This is the default and can be omitted
    api_key=os.environ.get("PROMPT_FOUNDRY_API_KEY"),
)

completion_create_response = client.completion.create(
    id="1212121",
    append_messages=[{
        "role": "user",
        "content": [{
            "type": "TEXT",
            "text": "What is the weather in Seattle, WA?",
        }],
    }],
)

print(completion_create_response.message)

Option 2 - Direct Provider Integration

Fetches the configured model parameters and messages rendered with the provided variables mapped to the set LLM provider. This endpoint abstracts the need to handle mapping between different providers, while still allowing direct calls to the providers.

OpenAI Integration

Install the OpenAI SDK

pip install openai

Import the OpenAI and Prompt Foundry SDKs

import os
from prompt_foundry_python_sdk import PromptFoundry
from openai import OpenAI

# Initialize Prompt Foundry SDK with your API key
pf = PromptFoundry(
    api_key=os.environ.get("PROMPT_FOUNDRY_API_KEY"),
)

# Initialize OpenAI SDK with your API key
openai = OpenAI(
    api_key=os.environ.get("OPENAI_API_KEY"),
)

def main():
    try:
        # Retrieve model parameters for the prompt
        model_parameters = pf.prompts.get_parameters(
            "1212121",
            variables={"hello": "world"},
            append_messages=[{
                "role": "user",
                "content": [{
                    "type": "TEXT",
                    "text": "What is the weather in Seattle, WA?",
                }],
            }],
        )

        # Check if provider is OpenAI
        if model_parameters.provider == "openai":
            # Use the retrieved parameters to create a chat completion request
            model_response = openai.chat.completions.create(
                **model_parameters.parameters
            )

            # Print the response from OpenAI
            print(model_response.data)

    except Exception as e:
        print(f"Error: {e}")

if __name__ == "__main__":
    main()

Anthropic Integration

Install the Anthropic SDK

pip install anthropic

Import the Anthropic and Prompt Foundry SDKs

import os
from prompt_foundry_python_sdk import PromptFoundry
from anthropic import Anthropic

# Initialize Prompt Foundry SDK with your API key
pf = PromptFoundry(
    api_key=os.environ.get("PROMPT_FOUNDRY_API_KEY"),
)

# Initialize Anthropic SDK with your API key
anthropic = client = Anthropic(
    api_key=os.environ.get("ANTHROPIC_API_KEY"),
)

def main():
    try:
        # Retrieve model parameters for the prompt
        model_parameters = pf.prompts.get_parameters(
            "1212121",
            variables={"hello": "world"},
            append_messages=[{
                "role": "user",
                "content": [{
                    "type": "TEXT",
                    "text": "What is the weather in Seattle, WA?",
                }],
            }],
        )

        # Check if provider is Anthropic
        if model_parameters.provider == "anthropic":
            # Use the retrieved parameters to create a chat request
            message = client.messages.create(
                **model_parameters.parameters
            )
            print(message.content)

    except Exception as e:
        print(f"Error: {e}")

if __name__ == "__main__":
    main()

Additional Information

For more details, visit the GitHub Repo.

Was this page helpful?