Skip to content

Commit

Permalink
feat: script to test providers and generate md table
Browse files Browse the repository at this point in the history
  • Loading branch information
nagarajpandith committed Dec 9, 2023
1 parent 01df7e9 commit d9f8c08
Show file tree
Hide file tree
Showing 4 changed files with 73 additions and 4 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
/venv/**
.flaskenv
.vercel
__pycache__
__pycache__
.env
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,5 +46,5 @@ curl -X POST -H "Content-Type: application/json" -d '{"content": "YOUR_PROMPT",
}
```

> **Note**
> [!NOTE]
> If you want to use the Hosted API, it's live at [https://gpt-flask.onrender.com](https://gpt-flask.onrender.com/). For the API key, email me at [[email protected]](mailto:[email protected]).
7 changes: 5 additions & 2 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,13 @@
import os
from dotenv import load_dotenv
load_dotenv()
API_KEY = os.environ.get('API_KEY')

from flask_cors import CORS

app = Flask(__name__)
CORS(app)

API_KEY = os.environ.get('API_KEY')

def generate_response(model, llm, content):
response_generator = g4f.ChatCompletion.create(
Expand Down Expand Up @@ -46,6 +47,8 @@ def chat_completion():
return jsonify({"error": f"Invalid provider: {pname}"}), 400

model = "gpt-3.5-turbo"
if not llm.supports_gpt_35_turbo:
model = "gpt-4"

return Response(generate_response(model, llm, content), content_type='text/event-stream', mimetype='text/event-stream')

Expand All @@ -60,4 +63,4 @@ def working_providers():
return jsonify({"working_providers": working_providers_list})

if __name__ == '__main__':
app.run(debug=True)
app.run(threaded=True)
65 changes: 65 additions & 0 deletions test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import requests
import time
import os
from dotenv import load_dotenv

load_dotenv()
API_KEY = os.environ.get('API_KEY')

hosted_providers_url = "https://gpt-flask.onrender.com/working_providers"
response = requests.get(hosted_providers_url)
providers = response.json().get("working_providers", [])

local_endpoint = "https://127.0.0.1:5000/chat_completion"
hosted_endpoint = "https://gpt-flask.onrender.com/chat_completion"

example_body_base = {
"content": "Hello!",
"api_key": API_KEY
}

def test_provider(provider, endpoint):
example_body = example_body_base.copy()
example_body["provider"] = provider

start_time = time.time()

try:
response = requests.post(endpoint, json=example_body, timeout=60)
response.raise_for_status()
except requests.exceptions.Timeout:
print(f"{provider} - Request timed out after 60 seconds")
return 408, 60
except requests.exceptions.RequestException as e:
print(f"{provider} - Request failed with error: {e}")
return 500, 0

end_time = time.time()
elapsed_time = end_time - start_time

print(f"{provider} - Response Time: {elapsed_time:.4f} seconds - Status Code: {response.status_code}")

return response.status_code, elapsed_time

markdown_table = "| Provider | Local/Hosted/Both | Average Response Time |\n| -------- | ----------------- | ------------------------------------- |\n"

for provider in providers:
local_status, local_time = test_provider(provider, local_endpoint)
hosted_status, hosted_time = test_provider(provider, hosted_endpoint)

if local_status == 200 and hosted_status == 200:
status = "Both"
average_time = (local_time + hosted_time) / 2
elif local_status == 200:
status = "Local"
average_time = local_time
elif hosted_status == 200:
status = "Hosted"
average_time = hosted_time
else:
status = "None"
average_time = 0.0

markdown_table += f"| {provider} | {status} | {average_time:.4f} | \n"

print(markdown_table)

0 comments on commit d9f8c08

Please sign in to comment.