diff --git a/README.md b/README.md index fb5b92a..1679a32 100644 --- a/README.md +++ b/README.md @@ -64,12 +64,14 @@ curl -X POST -H "Content-Type: application/json" -d '{"content": "YOUR_PROMPT", > (Last Tested on 9-12-2023) -> To test it yourself, run `python3 test.py` script. +> [!IMPORTANT] +> If a Provider isn't working, it's probably because it needs special args like auth='cookie' or 'jwt' or the WebDriver fails to connect, as web scraping is needed for most of the providers here or IP address blocking etc. Hence, do not consider the below results as final source of truth. To test it yourself, run `python3 test.py` script. > [!NOTE] > To know the Providers and their Models refer [this](https://github.com/xtekky/gpt4free?tab=readme-ov-file#-providers-and-models). **Status Values** + - **Both:** The provider works successfully on both the local and hosted API. - **Local:** The provider works successfully only on the local API. - **Hosted:** The provider works successfully only on the hosted API. diff --git a/app.py b/app.py index 8e1c183..4c164b8 100644 --- a/app.py +++ b/app.py @@ -27,7 +27,7 @@ def generate_response(model, llm, content): def chat_completion(): data = request.get_json() - required_params = {'content', 'provider', 'api_key'} + required_params = {'content', 'api_key'} missing_params = required_params - set(data.keys()) if missing_params: @@ -35,8 +35,9 @@ def chat_completion(): return jsonify({"error": error_msg}), 400 content = data['content'] - pname = data['provider'] + pname = data.get('provider', 'You') # Use 'You' by default, if not provided api_key = data['api_key'] + stream = data.get('stream', True) # Use True by default, if not provided if not api_key == API_KEY: return jsonify({"error": "Invalid API key"}), 401 @@ -50,7 +51,16 @@ def chat_completion(): if not llm.supports_gpt_35_turbo: model = "gpt-4" - return Response(generate_response(model, llm, content), content_type='text/event-stream', mimetype='text/event-stream') + if stream: + return Response(generate_response(model, llm, content), content_type='text/event-stream', mimetype='text/event-stream') + else: + response = g4f.ChatCompletion.create( + model=model, + provider=llm, + messages=[{"role": "user", "content": content}], + ) + + return jsonify(response) @app.route('/working_providers', methods=['GET']) def working_providers():