Skip to content

Commit

Permalink
patch : Discard is_conversation flag
Browse files Browse the repository at this point in the history
  • Loading branch information
Simatwa committed Apr 12, 2024
1 parent c466333 commit 54dfaf0
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 11 deletions.
4 changes: 2 additions & 2 deletions src/pytgpt/api/__main__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from pytgpt.console import API
from pytgpt.console import API

API.run()
API.run()
16 changes: 7 additions & 9 deletions src/pytgpt/api/v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
class UserPayload(BaseModel):
prompt: str
provider: str = "phind"
is_conversation: bool = False
# is_conversation: bool = False
whole: bool = False
max_tokens: PositiveInt = 600
timeout: PositiveInt = 30
Expand Down Expand Up @@ -67,7 +67,7 @@ class ProviderResponse(BaseModel):

def init_provider(payload: UserPayload) -> object:
return provider_map.get(payload.provider, GPT4FREE)(
is_conversation=payload.is_conversation,
is_conversation=False, # payload.is_conversation,
max_tokens=payload.max_tokens,
timeout=payload.timeout,
proxies=(
Expand All @@ -86,11 +86,10 @@ async def non_stream(payload: UserPayload) -> ProviderResponse:
- `prompt` : User query.
- `provider` : LLM provider name.
- `is_conversation` : Flag for chatting conversationally.
- `whole` : Include whole json formatted response as receiced from LLM provider.
- `whole` : Return whole response body instead of text only.
- `max_tokens` : Maximum number of tokens to be generated upon completion.
- `timeout` : Http request timeout.
- `proxy` : Http request proxies.
- `proxy` : Http request proxy.
"""
try:
provider_obj: LEO = init_provider(payload)
Expand Down Expand Up @@ -133,12 +132,11 @@ async def stream(payload: UserPayload) -> Any:
"""Stream back response as received.
- `prompt` : User query.
- `provider` : LLM provider name. from
- `is_conversation` : Flag for chatting conversationally.
- `whole` : Include whole json formatted response as receiced from LLM provider.
- `provider` : LLM provider name.
- `whole` : Return whole response body instead of text only.
- `max_tokens` : Maximum number of tokens to be generated upon completion.
- `timeout` : Http request timeout.
- `proxy` : Http request proxies.
- `proxy` : Http request proxy.
"""
return StreamingResponse(
generate_streaming_response(payload),
Expand Down

0 comments on commit 54dfaf0

Please sign in to comment.