Add OPENAI_BASE_URL support for custom OpenAI-compatible endpoints (#283)
Enables GPT models to use custom API endpoints through OPENAI_BASE_URL environment variable. This addresses the limitation where only Azure OpenAI supported custom endpoints while standard GPT models were hardcoded to api.openai.com. - Add intelligent URL handling to avoid duplicate /v1 paths - Maintain backward compatibility with default OpenAI API - Update README with configuration instructions - Non-breaking change preserving existing functionality Fixes API integration issues for users with custom OpenAI-compatible services.
This commit is contained in:
@@ -615,13 +615,17 @@ class PromptAgent:
|
||||
else:
|
||||
return response.json()['choices'][0]['message']['content']
|
||||
elif self.model.startswith("gpt"):
|
||||
# Support custom OpenAI base URL via environment variable
|
||||
base_url = os.environ.get('OPENAI_BASE_URL', 'https://api.openai.com')
|
||||
# Smart handling: avoid duplicate /v1 if base_url already ends with /v1
|
||||
api_url = f"{base_url}/chat/completions" if base_url.endswith('/v1') else f"{base_url}/v1/chat/completions"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {os.environ['OPENAI_API_KEY']}"
|
||||
}
|
||||
logger.info("Generating content with GPT model: %s", self.model)
|
||||
response = requests.post(
|
||||
"https://api.openai.com/v1/chat/completions",
|
||||
api_url,
|
||||
headers=headers,
|
||||
json=payload
|
||||
)
|
||||
@@ -631,7 +635,7 @@ class PromptAgent:
|
||||
logger.error("Context length exceeded. Retrying with a smaller context.")
|
||||
payload["messages"] = [payload["messages"][0]] + payload["messages"][-1:]
|
||||
retry_response = requests.post(
|
||||
"https://api.openai.com/v1/chat/completions",
|
||||
api_url,
|
||||
headers=headers,
|
||||
json=payload
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user