12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061 |
- import time
- import pandas as pd
- from config import (
- current_dir, CSV_FILE, system_prompt,
- EMBEDDINGS_FILE, FAISS_INDEX_FILE
- )
- from langchain.globals import set_llm_cache
- from langchain_community.cache import SQLiteCache
- from embeddings import load_embeddings
- from rag_chain import get_context, simple_rag_prompt, calculate_similarity
- # Set up cache
- set_llm_cache(SQLiteCache(database_path=".langchain.db"))
- def main():
- # 測試前N個問題
- n = 8
- embeddings, docs, df, index = load_embeddings()
-
- retrieval_chain = lambda q: get_context(q, index, docs)
-
- csv_path = f"{current_dir}/{CSV_FILE}"
- qa_df = pd.read_csv(csv_path)
-
- output_file = 'rag_output.txt'
-
- with open(output_file, 'w', encoding='utf-8') as f:
- for i in range(n):
- try:
- question = qa_df.iloc[i]['question']
- original_answer = qa_df.iloc[i]['answer']
-
- start_time = time.time()
- rag_answer, similarity_score = simple_rag_prompt(retrieval_chain, question)
- end_time = time.time()
-
- response_time = end_time - start_time
- answer_similarity = calculate_similarity(original_answer, rag_answer)
-
- f.write(f"Question {i+1}: {question}\n")
- f.write(f"Original Answer: {original_answer}\n")
- f.write(f"RAG Answer: {rag_answer}\n")
- f.write(f"Response Time: {response_time:.2f} seconds\n")
- f.write(f"Retrieval Similarity Score: {similarity_score:.4f}\n")
- f.write(f"Answer Similarity Score: {answer_similarity:.4f}\n")
- f.write("-" * 50 + "\n")
-
- f.flush()
- print(f"Processed question {i+1}")
-
- time.sleep(1)
- except Exception as e:
- print(f"Error processing question {i+1}: {str(e)}")
- f.write(f"Error processing question {i+1}: {str(e)}\n")
- f.write("-" * 50 + "\n")
- f.flush()
-
- print(f"Output has been saved to {output_file}")
- if __name__ == "__main__":
- main()
|