Skip to content

Commit

Permalink
Merge pull request #1958 from hlohaus/leech
Browse files Browse the repository at this point in the history
Add needs auth to provierds, Add PerplexityApi provider
  • Loading branch information
hlohaus committed May 16, 2024
2 parents d1c16ce + 24c5502 commit 0332c0c
Show file tree
Hide file tree
Showing 10 changed files with 72 additions and 43 deletions.
3 changes: 1 addition & 2 deletions g4f/Provider/DeepInfra.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@ class DeepInfra(Openai):
label = "DeepInfra"
url = "https://deepinfra.com"
working = True
needs_auth = False
has_auth = True
needs_auth = True
supports_stream = True
supports_message_history = True
default_model = "meta-llama/Meta-Llama-3-70b-instruct"
Expand Down
26 changes: 13 additions & 13 deletions g4f/Provider/Reka.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
class Reka(AbstractProvider):
url = "https://chat.reka.ai/"
working = True
needs_auth = True
supports_stream = True
default_vision_model = "reka"
cookies = {}
Expand All @@ -20,33 +21,32 @@ def create_completion(
messages: Messages,
stream: bool,
proxy: str = None,
timeout: int = 180,
api_key: str = None,
image: ImageType = None,
**kwargs
) -> CreateResult:
cls.proxy = proxy

if not api_key:
cls.cookies = get_cookies("chat.reka.ai")
if not cls.cookies:
raise ValueError("No cookies found for chat.reka.ai")
elif "appSession" not in cls.cookies:
raise ValueError("No appSession found in cookies for chat.reka.ai, log in or provide bearer_auth")
api_key = cls.get_access_token(cls)

conversation = []
for message in messages:
conversation.append({
"type": "human",
"text": message["content"],
})

if image:
image_url = cls.upload_image(cls, api_key, image)
conversation[-1]["image_url"] = image_url
conversation[-1]["media_type"] = "image"

headers = {
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
Expand All @@ -64,7 +64,7 @@ def create_completion(
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
}

json_data = {
'conversation_history': conversation,
'stream': True,
Expand All @@ -73,7 +73,7 @@ def create_completion(
'model_name': 'reka-core',
'random_seed': int(time.time() * 1000),
}

tokens = ''

response = requests.post('https://chat.reka.ai/api/chat',
Expand All @@ -82,11 +82,11 @@ def create_completion(
for completion in response.iter_lines():
if b'data' in completion:
token_data = json.loads(completion.decode('utf-8')[5:])['text']

yield (token_data.replace(tokens, ''))

tokens = token_data

def upload_image(cls, access_token, image: ImageType) -> str:
boundary_token = os.urandom(8).hex()

Expand Down Expand Up @@ -120,7 +120,7 @@ def upload_image(cls, access_token, image: ImageType) -> str:
cookies=cls.cookies, headers=headers, proxies=cls.proxy, data=data.encode('latin-1'))

return response.json()['media_url']

def get_access_token(cls):
headers = {
'accept': '*/*',
Expand All @@ -141,8 +141,8 @@ def get_access_token(cls):
try:
response = requests.get('https://chat.reka.ai/bff/auth/access_token',
cookies=cls.cookies, headers=headers, proxies=cls.proxy)

return response.json()['accessToken']

except Exception as e:
raise ValueError(f"Failed to get access token: {e}, refresh your cookies / log in into chat.reka.ai")
1 change: 1 addition & 0 deletions g4f/Provider/Replicate.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
working = True
needs_auth = True
default_model = "meta/meta-llama-3-70b-instruct"
model_aliases = {
"meta-llama/Meta-Llama-3-70B-Instruct": default_model
Expand Down
13 changes: 3 additions & 10 deletions g4f/Provider/You.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from __future__ import annotations

import re
import json
import base64
Expand Down Expand Up @@ -42,7 +44,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
]
model_aliases = {
"claude-v2": "claude-2",
"gpt-4o": "gpt-4o",
}
_cookies = None
_cookies_used = 0
Expand Down Expand Up @@ -185,15 +186,7 @@ def get_auth() -> str:
@classmethod
async def create_cookies(cls, client: StreamSession) -> Cookies:
if not cls._telemetry_ids:
try:
cls._telemetry_ids = await get_telemetry_ids()
except RuntimeError as e:
if str(e) == "Event loop is closed":
if debug.logging:
print("Event loop is closed error occurred in create_cookies.")
else:
raise

cls._telemetry_ids = await get_telemetry_ids()
user_uuid = str(uuid.uuid4())
telemetry_id = cls._telemetry_ids.pop()
if debug.logging:
Expand Down
9 changes: 6 additions & 3 deletions g4f/Provider/needs_auth/Gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class Gemini(AsyncGeneratorProvider):
_cookies: Cookies = None

@classmethod
async def nodriver_login(cls) -> AsyncIterator[str]:
async def nodriver_login(cls, proxy: str = None) -> AsyncIterator[str]:
try:
import nodriver as uc
except ImportError:
Expand All @@ -71,7 +71,10 @@ async def nodriver_login(cls) -> AsyncIterator[str]:
user_data_dir = None
if debug.logging:
print(f"Open nodriver with user_dir: {user_data_dir}")
browser = await uc.start(user_data_dir=user_data_dir)
browser = await uc.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
)
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield f"Please login: [Google Gemini]({login_url})\n\n"
Expand Down Expand Up @@ -134,7 +137,7 @@ async def create_async_generator(
) as session:
snlm0e = await cls.fetch_snlm0e(session, cls._cookies) if cls._cookies else None
if not snlm0e:
async for chunk in cls.nodriver_login():
async for chunk in cls.nodriver_login(proxy):
yield chunk
if cls._cookies is None:
async for chunk in cls.webdriver_login(proxy):
Expand Down
9 changes: 6 additions & 3 deletions g4f/Provider/needs_auth/OpenaiChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ async def create_async_generator(
except NoValidHarFileError as e:
error = e
if cls._api_key is None:
await cls.nodriver_access_token()
await cls.nodriver_access_token(proxy)
if cls._api_key is None and cls.needs_auth:
raise error
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
Expand Down Expand Up @@ -625,7 +625,7 @@ async def webview_access_token(cls) -> str:
cls._update_cookie_header()

@classmethod
async def nodriver_access_token(cls):
async def nodriver_access_token(cls, proxy: str = None):
try:
import nodriver as uc
except ImportError:
Expand All @@ -637,7 +637,10 @@ async def nodriver_access_token(cls):
user_data_dir = None
if debug.logging:
print(f"Open nodriver with user_dir: {user_data_dir}")
browser = await uc.start(user_data_dir=user_data_dir)
browser = await uc.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
)
page = await browser.get("https://chatgpt.com/")
await page.select("[id^=headlessui-menu-button-]", 240)
api_key = await page.evaluate(
Expand Down
31 changes: 31 additions & 0 deletions g4f/Provider/needs_auth/PerplexityApi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from __future__ import annotations

from .Openai import Openai
from ...typing import AsyncResult, Messages

class PerplexityApi(Openai):
label = "Perplexity API"
url = "https://www.perplexity.ai"
working = True
default_model = "llama-3-sonar-large-32k-online"
models = [
"llama-3-sonar-small-32k-chat",
"llama-3-sonar-small-32k-online",
"llama-3-sonar-large-32k-chat",
"llama-3-sonar-large-32k-online",
"llama-3-8b-instruct",
"llama-3-70b-instruct",
"mixtral-8x7b-instruct"
]

@classmethod
def create_async_generator(
cls,
model: str,
messages: Messages,
api_base: str = "https://api.perplexity.ai",
**kwargs
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
)
3 changes: 2 additions & 1 deletion g4f/Provider/needs_auth/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,5 @@
from .Openai import Openai
from .Groq import Groq
from .OpenRouter import OpenRouter
from .OpenaiAccount import OpenaiAccount
from .OpenaiAccount import OpenaiAccount
from .PerplexityApi import PerplexityApi
18 changes: 8 additions & 10 deletions g4f/Provider/you/har_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,36 +88,34 @@ async def get_telemetry_ids(proxy: str = None) -> list:
except NoValidHarFileError as e:
if debug.logging:
logging.error(e)
if debug.logging:
logging.error('Getting telemetry_id for you.com with nodriver')

try:
from nodriver import start
except ImportError:
raise MissingRequirementsError('Add .har file from you.com or install "nodriver" package | pip install -U nodriver')
page = None
if debug.logging:
logging.error('Getting telemetry_id for you.com with nodriver')

browser = page = None
try:
browser = await start()
browser = await start(
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
)
page = await browser.get("https://you.com")

while not await page.evaluate('"GetTelemetryID" in this'):
await page.sleep(1)

async def get_telemetry_id():
return await page.evaluate(
f'this.GetTelemetryID("{public_token}", "{telemetry_url}");',
await_promise=True
)

return [await get_telemetry_id()]

finally:
try:
if page is not None:
await page.close()

if browser is not None:
await browser.close()

except Exception as e:
if debug.logging:
logging.error(e)
2 changes: 1 addition & 1 deletion g4f/providers/retry_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ async def create_async_generator(
if not stream:
yield await provider.create_async(model, messages, **kwargs)
elif hasattr(provider, "create_async_generator"):
async for token in provider.create_async_generator(model, messages, stream, **kwargs):
async for token in provider.create_async_generator(model, messages, stream=stream, **kwargs):
yield token
else:
for token in provider.create_completion(model, messages, stream, **kwargs):
Expand Down

0 comments on commit 0332c0c

Please sign in to comment.