123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102 |
- # import os
- # import sys
- # from supabase import create_client, Client
- # # # Load environment variables
- # from dotenv import load_dotenv
- # load_dotenv('environment.env')
- # # Get Supabase configuration from environment variables
- # SUPABASE_URL = os.getenv("SUPABASE_URL")
- # SUPABASE_KEY = os.getenv("SUPABASE_KEY")
- # SUPABASE_URI = os.getenv("SUPABASE_URI")
- # OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
- # # Check if environment variables are successfully loaded
- # if not SUPABASE_URL or not SUPABASE_KEY or not OPENAI_API_KEY or not SUPABASE_URI:
- # print("Please ensure SUPABASE_URL, SUPABASE_KEY, and OPENAI_API_KEY are correctly set in the .env file.")
- # sys.exit(1)
- # else:
- # print("Connection successful.")
- # try:
- # supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
- # print("Client created successfully.")
- # except Exception as e:
- # print("Client creation failed:", e)
- # sys.exit(1)
- # # List all table names
- # try:
- # response = supabase.table('information_schema.tables').select('table_name').eq('table_schema', 'public').execute()
- # table_names = [table['table_name'] for table in response.data]
- # print("All table names:")
- # for name in table_names:
- # print(name)
- # except Exception as e:
- # print("Connection failed:", e)
- # sys.exit(1)
- # ### Test hugging face tokens for the TAIDE local model. ######################################################
- # from transformers import AutoTokenizer, AutoModelForCausalLM
- # token = os.getenv("HF_API_KEY_7B4BIT")
- # # Check if the token is loaded correctly
- # if token is None:
- # raise ValueError("Hugging Face API token is not set. Please check your environment.env file.")
- # # Load the tokenizer and model with the token
- # try:
- # tokenizer = AutoTokenizer.from_pretrained("../TAIDE-LX-7B-Chat-4bit", token=token)
- # model = AutoModelForCausalLM.from_pretrained("../TAIDE-LX-7B-Chat-4bit", token=token)
-
- # # Verify the model and tokenizer
- # print(f"Loaded tokenizer: {tokenizer.name_or_path}")
- # print(f"Loaded model: {model.name_or_path}")
- # # Optional: Print model and tokenizer configuration for more details
- # print(f"Model configuration: {model.config}")
- # print(f"Tokenizer configuration: {tokenizer}")
- # except Exception as e:
- # print(f"Error loading model or tokenizer: {e}")
- #################################################################################################################
- # import torch
- # from transformers import AutoModelForCausalLM, AutoTokenizer
- # from huggingface_hub import hf_hub_download
- # from llama_cpp import Llama
- # ## Download the GGUF model
- # model_name = "TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
- # model_file = "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf" # this is the specific model file we'll use in this example. It's a 4-bit quant, but other levels of quantization are available in the model repo if preferred
- # model_path = hf_hub_download(model_name, filename=model_file)
- # import requests
- # def generate_response(input_text, max_length=512, temperature=0.7):
- # # URL to interact with the model
- # url = "http://localhost:11434/v1/chat/completions" # Adjust based on how Ollama exposes the model
- # # Payload to send to the model
- # payload = {
- # "input": input_text,
- # "parameters": {
- # "max_length": max_length,
- # "temperature": temperature
- # }
- # }
- # # Make a request to the model
- # response = requests.post(url, json=payload)
- # return response.json()["output"]
- # if __name__ == "__main__":
- # input_text = "I believe the meaning of life is"
- # response = generate_response(input_text, max_length=128, temperature=0.5)
- # print(f"Model: {response}")
|