Преглед изворни кода

Remove files from remote repo

SherryLiu пре 7 месеци
родитељ
комит
edb92ac50d
9 измењених фајлова са 0 додато и 357 уклоњено
  1. 0 1
      =40.0.0
  2. 0 2
      Modelfile
  3. 0 198
      RAG_app.py
  4. 0 20
      README_docker.md
  5. 0 11
      docker-compose.yml
  6. 0 15
      dockerfile
  7. 0 74
      ollama_chat.py
  8. 0 30
      taide_rag.py
  9. 0 6
      test_data.txt

+ 0 - 1
=40.0.0

@@ -1 +0,0 @@
-Requirement already satisfied: setuptools in /opt/anaconda3/envs/choozemo-carbon/lib/python3.9/site-packages (70.1.1)

+ 0 - 2
Modelfile

@@ -1,2 +0,0 @@
-FROM "path to the .gguf file"
-# TAIDE .gguf file can be downlaoded here https://huggingface.co/taide/TAIDE-LX-7B-Chat-4bit/tree/main?show_file_info=taide-7b-a.2-q4_k_m.gguf

+ 0 - 198
RAG_app.py

@@ -1,198 +0,0 @@
-from dotenv import load_dotenv
-load_dotenv('environment.env')
-
-from fastapi import FastAPI, Request, HTTPException, status, Body
-# from fastapi.templating import Jinja2Templates
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.responses import FileResponse
-from fastapi import Depends
-from contextlib import asynccontextmanager
-from pydantic import BaseModel
-from typing import List, Optional
-import uvicorn
-
-from typing import List, Optional
-import sqlparse
-from sqlalchemy import create_engine
-import pandas as pd
-#from retrying import retry
-import datetime
-import json
-from json import loads
-import pandas as pd
-import time
-from langchain.callbacks import get_openai_callback
-
-from langchain_community.vectorstores import Chroma
-from langchain_openai import OpenAIEmbeddings
-from RAG_strategy import multi_query, naive_rag, naive_rag_for_qapairs
-from Indexing_Split import create_retriever as split_retriever
-from Indexing_Split import gen_doc_from_database, gen_doc_from_history
-
-import os
-from langchain_community.vectorstores import SupabaseVectorStore
-from langchain_openai import OpenAIEmbeddings
-from supabase.client import Client, create_client
-from add_vectordb import GetVectorStore
-from langchain_community.cache import RedisSemanticCache  # 更新导入路径
-from langchain_core.prompts import PromptTemplate
-import openai
-
-openai_api_key = os.getenv("OPENAI_API_KEY")
-openai.api_key = openai_api_key
-URI = os.getenv("SUPABASE_URI")
-
-global_retriever = None
-
-@asynccontextmanager
-async def lifespan(app: FastAPI):
-    global global_retriever
-    global vector_store
-    
-    start = time.time()
-    # global_retriever = split_retriever(path='./Documents', extension="docx")
-    # global_retriever = raptor_retriever(path='../Documents', extension="txt")
-    # global_retriever = unstructured_retriever(path='../Documents')
-
-    supabase_url = os.getenv("SUPABASE_URL")
-    supabase_key = os.getenv("SUPABASE_KEY")
-    document_table = "documents"
-    supabase: Client = create_client(supabase_url, supabase_key)
-
-    embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
-    vector_store = GetVectorStore(embeddings, supabase, document_table)
-    global_retriever = vector_store.as_retriever(search_kwargs={"k": 4})
-
-    print(time.time() - start)
-    yield
-
-def get_retriever():
-    return global_retriever
-
-
-def get_vector_store():
-    return vector_store
-
-app = FastAPI(lifespan=lifespan)
-
-# templates = Jinja2Templates(directory="temp")
-app.add_middleware(
-    CORSMiddleware,
-    allow_origins=["*"],
-    allow_credentials=True,
-    allow_methods=["*"],
-    allow_headers=["*"],
-)
-
-@app.get("/answer2")
-def multi_query_answer(question, retriever=Depends(get_retriever)):
-    start = time.time()
-
-    with get_openai_callback() as cb:
-        # qa_doc = gen_doc_from_database()
-        # qa_history_doc = gen_doc_from_history()
-        # qa_doc.extend(qa_history_doc)
-        # vectorstore = Chroma.from_documents(documents=qa_doc, embedding=OpenAIEmbeddings(), collection_name="qa_pairs")
-        # retriever_qa = vectorstore.as_retriever(search_kwargs={"k": 3})
-        # final_answer, reference_docs = naive_rag_for_qapairs(question, retriever_qa)
-        final_answer = 'False'
-        if final_answer == 'False':
-            final_answer, reference_docs = multi_query(question, retriever, chat_history=[])
-
-    # print(CHAT_HISTORY)
-    
-    # with get_openai_callback() as cb:
-    #     final_answer, reference_docs = multi_query(question, retriever)
-    processing_time = time.time() - start
-    print(processing_time)
-    save_history(question, final_answer, reference_docs, cb, processing_time)
-
-    return {"Answer": final_answer}
-
-class ChatHistoryItem(BaseModel):
-    q: str
-    a: str
-
-@app.post("/answer_with_history")
-def multi_query_answer(question: Optional[str] = '', chat_history: List[ChatHistoryItem] = Body(...), retriever=Depends(get_retriever)):
-    start = time.time()
-    
-    chat_history = [(item.q, item.a) for item in chat_history if item.a != ""]
-    print(chat_history)
-
-    # TODO: similarity search
-    
-    with get_openai_callback() as cb:
-        final_answer, reference_docs = multi_query(question, retriever, chat_history)
-    processing_time = time.time() - start
-    print(processing_time)
-    save_history(question, final_answer, reference_docs, cb, processing_time)
-
-    return {"Answer": final_answer}
-
-
-@app.post("/answer_with_history2")
-def multi_query_answer(question: Optional[str] = '', extension: Optional[str] = 'pdf', chat_history: List[ChatHistoryItem] = Body(...), retriever=Depends(get_retriever)):
-    start = time.time()
-
-    retriever = vector_store.as_retriever(search_kwargs={"k": 4,
-                                                         'filter': {'extension':extension}})
-    
-    chat_history = [(item.q, item.a) for item in chat_history if item.a != ""]
-    print(chat_history)
-
-    # TODO: similarity search
-    
-    with get_openai_callback() as cb:
-        final_answer, reference_docs = multi_query(question, retriever, chat_history)
-    processing_time = time.time() - start
-    print(processing_time)
-    save_history(question, final_answer, reference_docs, cb, processing_time)
-
-    return {"Answer": final_answer}
-
-def save_history(question, answer, reference, cb, processing_time):
-    # reference = [doc.dict() for doc in reference]
-    record = {
-        'Question': [question],
-        'Answer': [answer],
-        'Total_Tokens': [cb.total_tokens],
-        'Total_Cost': [cb.total_cost],
-        'Processing_time': [processing_time],
-        'Contexts': [str(reference)]
-    }
-    df = pd.DataFrame(record)
-    engine = create_engine(URI)
-    df.to_sql(name='systex_records', con=engine, index=False, if_exists='append')
-
-class history_output(BaseModel):
-    Question: str
-    Answer: str
-    Contexts: str
-    Total_Tokens: int
-    Total_Cost: float
-    Processing_time: float
-    Time: datetime.datetime
-    
-@app.get('/history', response_model=List[history_output])
-async def get_history():
-    engine = create_engine(URI, echo=True)
-
-    df = pd.read_sql_table("systex_records", engine.connect())  
-    df.fillna('', inplace=True)
-    result = df.to_json(orient='index', force_ascii=False)
-    result = loads(result)
-    return result.values()
-
-@app.get("/")
-def read_root():
-    return {"message": "Welcome to the SYSTEX API"}
-
-
-if __name__ == "__main__":
-    uvicorn.run("RAG_app:app", host='127.0.0.1', port=8081, reload=True)
-    
-# if __name__ == "__main__":
-#     uvicorn.run("RAG_app:app", host='cmm.ai', port=8081, reload=True, ssl_keyfile="/etc/letsencrypt/live/cmm.ai/privkey.pem", 
-#                 ssl_certfile="/etc/letsencrypt/live/cmm.ai/fullchain.pem")
-

+ 0 - 20
README_docker.md

@@ -1,20 +0,0 @@
-# Run TAIDE RAG in VM
-
-## Prerequisites
-- Docker and Docker Compose
-- Ollama (for creating the taide-local model)
-- Download TAIDE .gguf file. https://huggingface.co/taide/TAIDE-LX-7B-Chat-4bit/tree/main?show_file_info=taide-7b-a.2-q4_k_m.gguf Update the file path in `Modelfile`
-
-## Setting up taide-local
-
-1. Install Ollama on your VM 
-`curl -fsSL https://ollama.com/install.sh | sh`
-2. Create the taide-local model
-`ollama create taide-local -f Modelfile`
-
-## Running the Applciation
-1. Clone this repository.
-`git clone -b public https://github.com/yourusername/your-repo.git`
-2. Create a `.env` file in the project root with your API keys.
-3. Run `docker-compose up --build` 
-4. The application will be available at `http://localhost:8000`.

+ 0 - 11
docker-compose.yml

@@ -1,11 +0,0 @@
-services:
-  app:
-    build: .
-    ports:
-      - "8081:8081"
-    environment:
-      - OPENAI_API_KEY=${OPENAI_API_KEY}
-      - SUPABASE_URL=${SUPABASE_URL}
-      - SUPABASE_KEY=${SUPABASE_KEY}
-    volumes:
-      - ./Documents:/app/Documents

+ 0 - 15
dockerfile

@@ -1,15 +0,0 @@
-FROM python:3.9
-
-WORKDIR /app
-
-COPY requirements.txt .
-RUN pip install --no-cache-dir -r requirements.txt
-
-# Copy application files
-COPY RAG_strategy.py .
-COPY RAG_app_copy.py .
-COPY add_vectordb.py .
-COPY Indexing_Split.py
-COPY Documents/ ./Documents/
-
-CMD ["python", "RAG_app_copy.py"]

+ 0 - 74
ollama_chat.py

@@ -1,74 +0,0 @@
-import subprocess
-import json
-from typing import Any, List, Optional, Dict
-from langchain_core.callbacks import CallbackManagerForLLMRun
-from langchain_core.language_models import BaseChatModel
-from langchain_core.messages import BaseMessage, AIMessage, HumanMessage
-from langchain_core.outputs import ChatResult, ChatGeneration
-from pydantic import Field
-
-class OllamaChatModel(BaseChatModel):
-    model_name: str = Field(default="taide-local")
-
-    def _generate(
-        self,
-        messages: List[BaseMessage],
-        stop: Optional[List[str]] = None,
-        run_manager: Optional[CallbackManagerForLLMRun] = None,
-        **kwargs: Any,
-    ) -> ChatResult:
-        prompt = "\n".join([f"{msg.__class__.__name__}: {msg.content}" for msg in messages])
-        
-        command = ["ollama", "run", self.model_name, prompt]
-        result = subprocess.run(command, capture_output=True, text=True)
-        
-        if result.returncode != 0:
-            raise Exception(f"Ollama command failed: {result.stderr}")
-        
-        content = result.stdout.strip()
-        
-        message = AIMessage(content=content)
-        generation = ChatGeneration(message=message)
-        return ChatResult(generations=[generation])
-
-    @property
-    def _llm_type(self) -> str:
-        return "ollama-chat-model"
-
-def check_model_availability(model_name: str):
-    result = subprocess.run(["ollama", "list"], capture_output=True, text=True)
-    if result.returncode != 0:
-        print(f"Error checking model availability: {result.stderr}")
-        return False
-    
-    models = result.stdout.splitlines()
-    return any(model_name in model for model in models)
-
-# Usage example
-if __name__ == "__main__":
-    model_name = "taide-local"
-
-    print(f"Checking availability of model {model_name}...")
-    if not check_model_availability(model_name):
-        print(f"Model {model_name} is not available. Please check if it's correctly installed in Ollama.")
-        exit(1)
-
-    model = OllamaChatModel(model_name=model_name)
-    
-    print(f"Starting chat with {model_name} model. Type 'exit' to quit.")
-    
-    messages = []
-    while True:
-        user_input = input("You: ")
-        if user_input.lower() == 'exit':
-            break
-        
-        messages.append(HumanMessage(content=user_input))
-        try:
-            response = model.invoke(messages)
-            print("AI:", response.content)
-            messages.append(AIMessage(content=response.content))
-        except Exception as e:
-            print(f"Error communicating with Ollama: {e}")
-
-print("Chat session ended. Goodbye!")

+ 0 - 30
taide_rag.py

@@ -1,30 +0,0 @@
-from dotenv import load_dotenv
-from langchain.vectorstores import Chroma
-import os
-load_dotenv('environment.env')
-openai_api_key = os.getenv("OPENAI_API_KEY")
-from RAG_strategy import taide_llm, multi_query, naive_rag
-from langchain.vectorstores import FAISS
-from langchain.embeddings import OpenAIEmbeddings
-from langchain.document_loaders import TextLoader
-from langchain.text_splitter import CharacterTextSplitter
-
-
-
-# Load and prepare a sample document
-loader = TextLoader("test_data.txt")
-documents = loader.load()
-text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
-docs = text_splitter.split_documents(documents)
-
-# Create a vector store
-embeddings = OpenAIEmbeddings()
-vectorstore = Chroma.from_documents(docs, embeddings)
-retriever = vectorstore.as_retriever()
-
-# Test multi_query
-print("\nTesting multi_query:")
-question = "什麼是碳排放獎勵辦法?"
-answer, docs = multi_query(question, retriever, [])
-print(f"Question: {question}")
-print(f"Answer: {answer}")

+ 0 - 6
test_data.txt

@@ -1,6 +0,0 @@
-辦法所稱低碳產品,指符合下列條件之一者:
-一、取得中央主管機關核發碳足跡標籤(以下簡稱碳標籤)使用權,且碳足跡數值為同類型碳標籤產品中前百分之十。
-二、取得中央主管機關核發之碳足跡減量標籤(以下簡稱減碳標籤)使用權。
-三、經中央主管機關審查展期通過且具實際減碳成效之碳標籤使用權。
-前項第一款所稱同類型碳標籤產品,指適用相同之碳足跡產品類別規則文件,且中華民國輸出入貨品分類號列前六碼相同或歸屬於相同之中華民國行業標準分類細類產品。但
-碳標籤產品無法以中華民國輸出入貨品分類號列或中華民國行業標準分類進行分類者,得檢具主管機關或