test_connection.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. # import os
  2. # import sys
  3. # from supabase import create_client, Client
  4. # # # Load environment variables
  5. # from dotenv import load_dotenv
  6. # load_dotenv('environment.env')
  7. # # Get Supabase configuration from environment variables
  8. # SUPABASE_URL = os.getenv("SUPABASE_URL")
  9. # SUPABASE_KEY = os.getenv("SUPABASE_KEY")
  10. # SUPABASE_URI = os.getenv("SUPABASE_URI")
  11. # OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
  12. # # Check if environment variables are successfully loaded
  13. # if not SUPABASE_URL or not SUPABASE_KEY or not OPENAI_API_KEY or not SUPABASE_URI:
  14. # print("Please ensure SUPABASE_URL, SUPABASE_KEY, and OPENAI_API_KEY are correctly set in the .env file.")
  15. # sys.exit(1)
  16. # else:
  17. # print("Connection successful.")
  18. # try:
  19. # supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
  20. # print("Client created successfully.")
  21. # except Exception as e:
  22. # print("Client creation failed:", e)
  23. # sys.exit(1)
  24. # # List all table names
  25. # try:
  26. # response = supabase.table('information_schema.tables').select('table_name').eq('table_schema', 'public').execute()
  27. # table_names = [table['table_name'] for table in response.data]
  28. # print("All table names:")
  29. # for name in table_names:
  30. # print(name)
  31. # except Exception as e:
  32. # print("Connection failed:", e)
  33. # sys.exit(1)
  34. # ### Test hugging face tokens for the TAIDE local model. ######################################################
  35. # from transformers import AutoTokenizer, AutoModelForCausalLM
  36. # token = os.getenv("HF_API_KEY_7B4BIT")
  37. # # Check if the token is loaded correctly
  38. # if token is None:
  39. # raise ValueError("Hugging Face API token is not set. Please check your environment.env file.")
  40. # # Load the tokenizer and model with the token
  41. # try:
  42. # tokenizer = AutoTokenizer.from_pretrained("../TAIDE-LX-7B-Chat-4bit", token=token)
  43. # model = AutoModelForCausalLM.from_pretrained("../TAIDE-LX-7B-Chat-4bit", token=token)
  44. # # Verify the model and tokenizer
  45. # print(f"Loaded tokenizer: {tokenizer.name_or_path}")
  46. # print(f"Loaded model: {model.name_or_path}")
  47. # # Optional: Print model and tokenizer configuration for more details
  48. # print(f"Model configuration: {model.config}")
  49. # print(f"Tokenizer configuration: {tokenizer}")
  50. # except Exception as e:
  51. # print(f"Error loading model or tokenizer: {e}")
  52. #################################################################################################################
  53. # import torch
  54. # from transformers import AutoModelForCausalLM, AutoTokenizer
  55. # from huggingface_hub import hf_hub_download
  56. # from llama_cpp import Llama
  57. # ## Download the GGUF model
  58. # model_name = "TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
  59. # model_file = "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf" # this is the specific model file we'll use in this example. It's a 4-bit quant, but other levels of quantization are available in the model repo if preferred
  60. # model_path = hf_hub_download(model_name, filename=model_file)
  61. # import requests
  62. # def generate_response(input_text, max_length=512, temperature=0.7):
  63. # # URL to interact with the model
  64. # url = "http://localhost:11434/v1/chat/completions" # Adjust based on how Ollama exposes the model
  65. # # Payload to send to the model
  66. # payload = {
  67. # "input": input_text,
  68. # "parameters": {
  69. # "max_length": max_length,
  70. # "temperature": temperature
  71. # }
  72. # }
  73. # # Make a request to the model
  74. # response = requests.post(url, json=payload)
  75. # return response.json()["output"]
  76. # if __name__ == "__main__":
  77. # input_text = "I believe the meaning of life is"
  78. # response = generate_response(input_text, max_length=128, temperature=0.5)
  79. # print(f"Model: {response}")