Prompt Foundry is a comprehensive tool for prompt engineering, management, and evaluation. It is designed to simplify the development and integration process for teams working on Python AI applications utilizing large language models (LLMs).
Initiates a completion request to the configured LLM provider using specified parameters and provided variables. This endpoint abstracts the integration with different model providers, enabling seamless switching between models while maintaining a consistent data model for your application.
Copy
import osfrom prompt_foundry_python_sdk import PromptFoundryclient = PromptFoundry( # This is the default and can be omitted api_key=os.environ.get("PROMPT_FOUNDRY_API_KEY"),)completion_create_response = client.completion.create( id="1212121", append_messages=[{ "role": "user", "content": [{ "type": "TEXT", "text": "What is the weather in Seattle, WA?", }], }],)print(completion_create_response.message)
Fetches the configured model parameters and messages rendered with the provided variables mapped to the set LLM provider. This endpoint abstracts the need to handle mapping between different providers, while still allowing direct calls to the providers.
import osfrom prompt_foundry_python_sdk import PromptFoundryfrom openai import OpenAI# Initialize Prompt Foundry SDK with your API keypf = PromptFoundry( api_key=os.environ.get("PROMPT_FOUNDRY_API_KEY"),)# Initialize OpenAI SDK with your API keyopenai = OpenAI( api_key=os.environ.get("OPENAI_API_KEY"),)def main(): try: # Retrieve model parameters for the prompt model_parameters = pf.prompts.get_parameters( "1212121", variables={"hello": "world"}, append_messages=[{ "role": "user", "content": [{ "type": "TEXT", "text": "What is the weather in Seattle, WA?", }], }], ) # Check if provider is OpenAI if model_parameters.provider == "openai": # Use the retrieved parameters to create a chat completion request model_response = openai.chat.completions.create( **model_parameters.parameters ) # Print the response from OpenAI print(model_response.data) except Exception as e: print(f"Error: {e}")if __name__ == "__main__": main()
import osfrom prompt_foundry_python_sdk import PromptFoundryfrom anthropic import Anthropic# Initialize Prompt Foundry SDK with your API keypf = PromptFoundry( api_key=os.environ.get("PROMPT_FOUNDRY_API_KEY"),)# Initialize Anthropic SDK with your API keyanthropic = client = Anthropic( api_key=os.environ.get("ANTHROPIC_API_KEY"),)def main(): try: # Retrieve model parameters for the prompt model_parameters = pf.prompts.get_parameters( "1212121", variables={"hello": "world"}, append_messages=[{ "role": "user", "content": [{ "type": "TEXT", "text": "What is the weather in Seattle, WA?", }], }], ) # Check if provider is Anthropic if model_parameters.provider == "anthropic": # Use the retrieved parameters to create a chat request message = client.messages.create( **model_parameters.parameters ) print(message.content) except Exception as e: print(f"Error: {e}")if __name__ == "__main__": main()