From 54dfaf02ac741d46edec1cd4cd85245011730ac8 Mon Sep 17 00:00:00 2001 From: smartwa Date: Fri, 12 Apr 2024 06:26:25 +0300 Subject: [PATCH] patch : Discard is_conversation flag --- src/pytgpt/api/__main__.py | 4 ++-- src/pytgpt/api/v1.py | 16 +++++++--------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/pytgpt/api/__main__.py b/src/pytgpt/api/__main__.py index 8ced50f..153826b 100644 --- a/src/pytgpt/api/__main__.py +++ b/src/pytgpt/api/__main__.py @@ -1,3 +1,3 @@ -from pytgpt.console import API +from pytgpt.console import API -API.run() \ No newline at end of file +API.run() diff --git a/src/pytgpt/api/v1.py b/src/pytgpt/api/v1.py index 0d8ef2b..02de8f3 100644 --- a/src/pytgpt/api/v1.py +++ b/src/pytgpt/api/v1.py @@ -34,7 +34,7 @@ class UserPayload(BaseModel): prompt: str provider: str = "phind" - is_conversation: bool = False + # is_conversation: bool = False whole: bool = False max_tokens: PositiveInt = 600 timeout: PositiveInt = 30 @@ -67,7 +67,7 @@ class ProviderResponse(BaseModel): def init_provider(payload: UserPayload) -> object: return provider_map.get(payload.provider, GPT4FREE)( - is_conversation=payload.is_conversation, + is_conversation=False, # payload.is_conversation, max_tokens=payload.max_tokens, timeout=payload.timeout, proxies=( @@ -86,11 +86,10 @@ async def non_stream(payload: UserPayload) -> ProviderResponse: - `prompt` : User query. - `provider` : LLM provider name. - - `is_conversation` : Flag for chatting conversationally. - - `whole` : Include whole json formatted response as receiced from LLM provider. + - `whole` : Return whole response body instead of text only. - `max_tokens` : Maximum number of tokens to be generated upon completion. - `timeout` : Http request timeout. - - `proxy` : Http request proxies. + - `proxy` : Http request proxy. """ try: provider_obj: LEO = init_provider(payload) @@ -133,12 +132,11 @@ async def stream(payload: UserPayload) -> Any: """Stream back response as received. - `prompt` : User query. - - `provider` : LLM provider name. from - - `is_conversation` : Flag for chatting conversationally. - - `whole` : Include whole json formatted response as receiced from LLM provider. + - `provider` : LLM provider name. + - `whole` : Return whole response body instead of text only. - `max_tokens` : Maximum number of tokens to be generated upon completion. - `timeout` : Http request timeout. - - `proxy` : Http request proxies. + - `proxy` : Http request proxy. """ return StreamingResponse( generate_streaming_response(payload),