Last active
September 18, 2023 18:07
-
-
Save haseeb-heaven/ade18b108bdff783ac11a245e27a7efd to your computer and use it in GitHub Desktop.
Heaven-GPT a free GPT by Heaven using free GPT 4 API.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import logging | |
from litellm import completion | |
## set ENV variables | |
os.environ["OPENAI_API_KEY"] = "set anything here - key is not used for proxy" | |
logging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s',level=logging.INFO) | |
def get_chat_completion(messages,model, api_base, custom_llm_provider, temperature, max_tokens, stream): | |
""" | |
This function gets the response from the completion API. | |
""" | |
try: | |
logging.info(f"Chat completions started with arguments as Messages '{messages}', Model '{model}', API Base '{api_base}', Custom LLM Provider '{custom_llm_provider}', Temperature '{temperature}', Max Tokens '{max_tokens}', Stream '{stream}'.") | |
chat_completion = completion( | |
model=model, | |
messages=messages, | |
api_base=api_base, | |
custom_llm_provider=custom_llm_provider, | |
temperature=temperature, | |
max_tokens=max_tokens, | |
stream=stream | |
) | |
logging.info('API call successful.') | |
return chat_completion | |
except Exception as e: | |
logging.error(f'Error occurred: {e}') | |
return None | |
def process_chat_completion(chat_completion): | |
""" | |
This function processes the response from the completion API. | |
It checks for the presence of each element in the JSON response. | |
""" | |
try: | |
if isinstance(chat_completion, dict): | |
logging.info('Chat completion is a dictionary.') | |
# not stream | |
print(chat_completion['choices'][0]['message']['content']) | |
else: | |
# stream | |
for chunk in chat_completion: | |
# Check if 'choices' is in the chunk | |
if 'choices' in chunk: | |
# Check if the first element in 'choices' has 'delta' | |
if 'delta' in chunk['choices'][0]: | |
# Check if 'content' is in 'delta' | |
if 'content' in chunk['choices'][0]['delta']: | |
print(chunk['choices'][0]['delta']['content'], end=" ", flush=True) | |
logging.info('Chunk processed successfully.') | |
else: | |
logging.error('Error: "content" not found in "delta".') | |
else: | |
logging.error('Error: "delta" not found in choices[0].') | |
else: | |
logging.error('Error: "choices" not found in chunk.') | |
except Exception as e: | |
logging.error(f'Error occurred: {e}') | |
def main(): | |
""" | |
Main function to execute the program. | |
""" | |
while True: | |
prompt = input("Enter Prompt: ") | |
messages = [ | |
{"role": "system", "content": "You are a senior software developer called HeavenHM, experienced in multiple programming languages and software architectures. You provide detailed, clear, and efficient solutions."}, | |
{"role": "user", "content": f"Generate the code for {prompt} the output should be only code and nothing else and provide clean and error and exception handled code with modular approach"} | |
] | |
api_base = "https://chatgpt-4all.haseebmir.repl.co" | |
response = get_chat_completion(model="gpt-3.5-turbo", messages=messages, api_base=api_base, custom_llm_provider="openai", temperature=0.1, max_tokens=2048, stream=True) | |
logging.info(f"Response is {response}") | |
if response is not None: | |
process_chat_completion(response) | |
print("\nESC to quit or any other key to continue...") | |
if input() == '\x1b': # '\x1b' is the ASCII value of ESC key | |
break | |
if __name__ == "__main__": | |
try: | |
main() | |
except Exception as exception: | |
logging.error(f'Error occurred in main function: {exception}') | |
finally: | |
logging.info('__main__ ended.') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment