ai_agent.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. from langchain_community.chat_models import ChatOllama
  2. from langchain_core.output_parsers import JsonOutputParser
  3. from langchain_core.prompts import PromptTemplate
  4. from langchain.prompts import ChatPromptTemplate
  5. from langchain_core.output_parsers import StrOutputParser
  6. # graph usage
  7. from pprint import pprint
  8. from typing import List
  9. from langchain_core.documents import Document
  10. from typing_extensions import TypedDict
  11. from langgraph.graph import END, StateGraph, START
  12. from langgraph.pregel import RetryPolicy
  13. # supabase db
  14. from langchain_community.utilities import SQLDatabase
  15. import os
  16. from dotenv import load_dotenv
  17. load_dotenv()
  18. URI: str = os.environ.get('SUPABASE_URI')
  19. db = SQLDatabase.from_uri(URI)
  20. # LLM
  21. # local_llm = "llama3.1:8b-instruct-fp16"
  22. # local_llm = "llama3.1:8b-instruct-q2_K"
  23. local_llm = "llama3-groq-tool-use:latest"
  24. llm_json = ChatOllama(model=local_llm, format="json", temperature=0)
  25. # llm = ChatOllama(model=local_llm, temperature=0)
  26. from langchain_openai import ChatOpenAI
  27. llm = ChatOpenAI(model_name="gpt-4o-mini", temperature=0)
  28. # RAG usage
  29. from faiss_index import create_faiss_retriever, faiss_multiquery, faiss_query
  30. retriever = create_faiss_retriever()
  31. # text-to-sql usage
  32. from text_to_sql_private import run, get_query, query_to_nl, table_description
  33. from post_processing_sqlparse import get_query_columns, parse_sql_where, get_table_name
  34. progress_bar = []
  35. def faiss_query(question: str, llm, docs=None, multi_query: bool = False) -> str:
  36. if multi_query:
  37. docs = faiss_multiquery(question, retriever, llm, k=4)
  38. # print(docs)
  39. elif docs:
  40. pass
  41. else:
  42. docs = retriever.get_relevant_documents(question, k=10)
  43. # print(docs)
  44. context = docs
  45. system_prompt: str = "你是一個來自台灣的AI助理,樂於以台灣人的立場幫助使用者,會用繁體中文回答問題。"
  46. template = """
  47. <|begin_of_text|>
  48. <|start_header_id|>system<|end_header_id|>
  49. 你是一個來自台灣的ESG的AI助理,請用繁體中文回答問題 \n
  50. You should not mention anything about "根據提供的文件內容" or other similar terms.
  51. 請盡可能的詳細回答問題。
  52. 如果你不知道答案請回答:"很抱歉,目前我無法回答您的問題,請將您的詢問發送至 test@systex.com 以便獲得更進一步的幫助,謝謝。"
  53. 勿回答無關資訊或任何與某特定公司相關的問題
  54. <|eot_id|>
  55. <|start_header_id|>user<|end_header_id|>
  56. Answer the following question based on this context:
  57. {context}
  58. Question: {question}
  59. 用繁體中文回答問題,請用一段話詳細的回答。勿回答無關資訊或任何與某特定公司相關的問題。
  60. 如果你不知道答案請回答:"很抱歉,目前我無法回答您的問題,請將您的詢問發送至 test@systex.com 以便獲得更進一步的幫助,謝謝。"
  61. <|eot_id|>
  62. <|start_header_id|>assistant<|end_header_id|>
  63. """
  64. prompt = ChatPromptTemplate.from_template(
  65. system_prompt + "\n\n" +
  66. template
  67. )
  68. rag_chain = prompt | llm | StrOutputParser()
  69. return docs, rag_chain.invoke({"context": context, "question": question})
  70. ### Hallucination Grader
  71. def Hallucination_Grader():
  72. # Prompt
  73. prompt = PromptTemplate(
  74. template=""" <|begin_of_text|><|start_header_id|>system<|end_header_id|>
  75. You are a grader assessing whether an answer is grounded in / supported by a set of facts.
  76. Give 'yes' or 'no' score to indicate whether the answer is grounded in / supported by a set of facts.
  77. Provide 'yes' or 'no' score as a JSON with a single key 'score' and no preamble or explanation.
  78. Return the a JSON with a single key 'score' and no premable or explanation.
  79. <|eot_id|><|start_header_id|>user<|end_header_id|>
  80. Here are the facts:
  81. \n ------- \n
  82. {documents}
  83. \n ------- \n
  84. Here is the answer: {generation}
  85. Provide 'yes' or 'no' score as a JSON with a single key 'score' and no premable or explanation.
  86. <|eot_id|><|start_header_id|>assistant<|end_header_id|>""",
  87. input_variables=["generation", "documents"],
  88. )
  89. hallucination_grader = prompt | llm_json | JsonOutputParser()
  90. return hallucination_grader
  91. ### Answer Grader
  92. def Answer_Grader():
  93. # Prompt
  94. prompt = PromptTemplate(
  95. template="""
  96. <|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a grader assessing whether an
  97. answer is useful to resolve a question. Give a binary score 'yes' or 'no' to indicate whether the answer is
  98. useful to resolve a question. Provide the binary score as a JSON with a single key 'score' and no preamble or explanation.
  99. <|eot_id|><|start_header_id|>user<|end_header_id|> Here is the answer:
  100. \n ------- \n
  101. {generation}
  102. \n ------- \n
  103. Here is the question: {question}
  104. <|eot_id|><|start_header_id|>assistant<|end_header_id|>""",
  105. input_variables=["generation", "question"],
  106. )
  107. answer_grader = prompt | llm_json | JsonOutputParser()
  108. return answer_grader
  109. # Text-to-SQL
  110. def run_text_to_sql(question: str):
  111. selected_table = ['用水度數', '用水度數', '建準碳排放清冊數據new']
  112. # question = "建準去年的固定燃燒總排放量是多少?"
  113. query, result, answer = run(db, question, selected_table, llm)
  114. return answer, query
  115. def _get_query(question: str):
  116. selected_table = ['用水度數', '用水度數', '建準碳排放清冊數據new']
  117. question = question.replace("美國", "美國 Inc")
  118. question = question.replace("法國", "法國 SAS")
  119. query, result = get_query(db, question, selected_table, llm)
  120. return query, result
  121. def _query_to_nl(question: str, query: str, result):
  122. question = question.replace("美國", "美國 Inc")
  123. question = question.replace("法國", "法國 SAS")
  124. answer = query_to_nl(question, query, result, llm)
  125. return answer
  126. def generate_additional_question(sql_query):
  127. terms = parse_sql_where(sql_query)
  128. question_list = []
  129. for term in terms:
  130. if term is None: continue
  131. question_format = [f"什麼是{term}?", f"{term}的用途是什麼"]
  132. question_list.extend(question_format)
  133. return question_list
  134. def generate_additional_detail(sql_query):
  135. terms = parse_sql_where(sql_query)
  136. answer = ""
  137. all_documents = []
  138. for term in list(set(terms)):
  139. print(term)
  140. if term is None: continue
  141. question_format = [ f"溫室氣體排放源中的{term}是什麼意思?", f"{term}是什麼意思?"]
  142. for question in question_format:
  143. documents = retriever.get_relevant_documents(question, k=5)
  144. all_documents.extend(documents)
  145. all_question = "".join(question_format)
  146. documents, generation = faiss_query(all_question, llm, docs=all_documents, multi_query=True)
  147. if "test@systex.com" in generation:
  148. generation = ""
  149. answer += generation
  150. # print(question)
  151. # print(generation)
  152. return answer
  153. ### SQL Grader
  154. def SQL_Grader():
  155. prompt = PromptTemplate(
  156. template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|>
  157. You are a SQL query grader assessing correctness of PostgreSQL query to a user question.
  158. Based on following database description, you need to grade whether the PostgreSQL query exactly matches the user question.
  159. Here is database description:
  160. {table_info}
  161. You need to check that each where statement is correctly filtered out what user question need.
  162. For example, if user question is "建準去年固定燃燒總排放量是多少?", and the PostgreSQL query is
  163. "SELECT SUM("排放量(公噸CO2e)") AS "下游租賃總排放量"
  164. FROM "建準碳排放清冊數據new"
  165. WHERE "事業名稱" like '%建準%'
  166. AND "排放源" = '下游租賃'
  167. AND "盤查標準" = 'GHG'
  168. AND "年度" = EXTRACT(YEAR FROM CURRENT_DATE)-1;"
  169. For the above example, we can find that user asked for "固定燃燒", but the PostgreSQL query gives "排放源" = '下游租賃' in WHERE statement, which means the PostgreSQL query is incorrect for the user question.
  170. Another example like "建準去年的固定燃燒總排放量是多少?", and the PostgreSQL query is
  171. "SELECT SUM("排放量(公噸CO2e)") AS "固定燃燒總排放量"
  172. FROM "建準碳排放清冊數據new"
  173. WHERE "事業名稱" like '%台積電%'
  174. AND "排放源" = '固定燃燒'
  175. AND "盤查標準" = 'GHG'
  176. AND "年度" = EXTRACT(YEAR FROM CURRENT_DATE)-1;"
  177. For the above example, we can find that user asked for "建準", but the PostgreSQL query gives "事業名稱" like '%台積電%' in WHERE statement, which means the PostgreSQL query is incorrect for the user question.
  178. and so on. You need to strictly examine whether the sql PostgreSQL query matches the user question.
  179. If the PostgreSQL query do not exactly matches the user question, grade it as incorrect.
  180. You need to strictly examine whether the sql PostgreSQL query matches the user question.
  181. Give a binary score 'yes' or 'no' score to indicate whether the PostgreSQL query is correct to the question. \n
  182. Provide the binary score as a JSON with a single key 'score' and no premable or explanation.
  183. <|eot_id|>
  184. <|start_header_id|>user<|end_header_id|>
  185. Here is the PostgreSQL query: \n\n {sql_query} \n\n
  186. Here is the user question: {question} \n <|eot_id|><|start_header_id|>assistant<|end_header_id|>
  187. """,
  188. input_variables=["table_info", "question", "sql_query"],
  189. )
  190. sql_query_grader = prompt | llm_json | JsonOutputParser()
  191. return sql_query_grader
  192. ### Router
  193. def Router():
  194. prompt = PromptTemplate(
  195. template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|>
  196. You are an expert at routing a user question to a 專業知識 or 自有數據.
  197. 你需要分辨使用者問題是否在詢問某個公司與其據點廠房的自有數據或是尋求專業的碳盤查或碳管理等等的 ESG 知識和相關新聞,
  198. 如果問題是想了解某個公司與其據點廠房的碳排放源的排放量或用電、用水量等等,請使用"自有數據",
  199. 若使用者的問題是想了解碳盤查、碳交易或碳管理等等的 ESG 知識和相關新聞,請使用"專業知識"。
  200. You do not need to be stringent with the keywords in the question related to these topics.
  201. Give a binary choice '自有數據' or '專業知識' based on the question.
  202. Return the a JSON with a single key 'datasource' and no premable or explanation.
  203. Question to route: {question}
  204. <|eot_id|><|start_header_id|>assistant<|end_header_id|>""",
  205. input_variables=["question"],
  206. )
  207. question_router = prompt | llm_json | JsonOutputParser()
  208. return question_router
  209. class GraphState(TypedDict):
  210. """
  211. Represents the state of our graph.
  212. Attributes:
  213. question: question
  214. generation: LLM generation
  215. company_private_data: whether to search company private data
  216. documents: list of documents
  217. """
  218. progress_bar: List[str]
  219. route: str
  220. question: str
  221. question_list: List[str]
  222. generation: str
  223. documents: List[str]
  224. retry: int
  225. sql_query: str
  226. sql_result: str
  227. # Node
  228. def show_progress(state, progress: str):
  229. global progress_bar
  230. # progress_bar = state["progress_bar"] if state["progress_bar"] else []
  231. print(progress)
  232. progress_bar.append(progress)
  233. return progress_bar
  234. def retrieve_and_generation(state):
  235. """
  236. Retrieve documents from vectorstore
  237. Args:
  238. state (dict): The current graph state
  239. Returns:
  240. state (dict): New key added to state, documents, that contains retrieved documents, and generation, genrating by LLM
  241. """
  242. progress_bar = show_progress(state, "---RETRIEVE---")
  243. if not state["route"]:
  244. route = "RAG"
  245. else:
  246. route = state["route"]
  247. question = state["question"]
  248. documents, generation = faiss_query(question, llm, multi_query=True)
  249. print(generation)
  250. return {"progress_bar": progress_bar, "route": route, "documents": documents, "question": question, "generation": generation}
  251. def company_private_data_get_sql_query(state):
  252. """
  253. Get PostgreSQL query according to question
  254. Args:
  255. state (dict): The current graph state
  256. Returns:
  257. state (dict): return generated PostgreSQL query and record retry times
  258. """
  259. # print("---SQL QUERY---")
  260. progress_bar = show_progress(state, "---SQL QUERY---")
  261. if not state["route"]:
  262. route = "SQL"
  263. else:
  264. route = state["route"]
  265. question = state["question"]
  266. if state["retry"]:
  267. retry = state["retry"]
  268. retry += 1
  269. else:
  270. retry = 0
  271. # print("RETRY: ", retry)
  272. sql_query, sql_result = _get_query(question)
  273. print(type(sql_result))
  274. return {"progress_bar": progress_bar, "route": route, "sql_query": sql_query, "sql_result": sql_result, "question": question, "retry": retry}
  275. def company_private_data_search(state):
  276. """
  277. Execute PostgreSQL query and convert to nature language.
  278. Args:
  279. state (dict): The current graph state
  280. Returns:
  281. state (dict): Appended sql results to state
  282. """
  283. # print("---SQL TO NL---")
  284. progress_bar = show_progress(state, "---SQL TO NL---")
  285. # print(state)
  286. question = state["question"]
  287. sql_query = state["sql_query"]
  288. sql_result = state["sql_result"]
  289. generation = _query_to_nl(question, sql_query, sql_result)
  290. # generation = [company_private_data_result]
  291. return {"progress_bar": progress_bar, "sql_query": sql_query, "question": question, "generation": generation}
  292. def additional_explanation_question(state):
  293. """
  294. Args:
  295. state (_type_): _description_
  296. Returns:
  297. state (dict): Appended additional explanation to state
  298. """
  299. # print("---ADDITIONAL EXPLANATION---")
  300. progress_bar = show_progress(state, "---ADDITIONAL EXPLANATION---")
  301. # print(state)
  302. question = state["question"]
  303. sql_query = state["sql_query"]
  304. # print(sql_query)
  305. generation = state["generation"]
  306. generation += "\n"
  307. generation += generate_additional_detail(sql_query)
  308. question_list = []
  309. # question_list = generate_additional_question(sql_query)
  310. # print(question_list)
  311. # generation = [company_private_data_result]
  312. return {"progress_bar": progress_bar, "sql_query": sql_query, "question": question, "generation": generation, "question_list": question_list}
  313. def error(state):
  314. # print("---SOMETHING WENT WRONG---")
  315. progress_bar = show_progress(state, "---SOMETHING WENT WRONG---")
  316. generation = "很抱歉,目前我無法回答您的問題,請將您的詢問發送至 test@systex.com 以便獲得更進一步的幫助,謝謝。"
  317. return {"progress_bar": progress_bar, "generation": generation}
  318. ### Conditional edge
  319. def route_question(state):
  320. """
  321. Route question to web search or RAG.
  322. Args:
  323. state (dict): The current graph state
  324. Returns:
  325. str: Next node to call
  326. """
  327. if "route" in state.keys():
  328. return "專業知識"
  329. # print("---ROUTE QUESTION---")
  330. progress_bar = show_progress(state, "---ROUTE QUESTION---")
  331. question = state["question"]
  332. # print(question)
  333. question_router = Router()
  334. source = question_router.invoke({"question": question})
  335. print("Original:", source["datasource"])
  336. # if "建準" in question:
  337. private_kw = ["建準", "北海", "廣興", "崑山廣興", "Inc", "SAS", "立準"]
  338. public_kw = ["範例", "碳足跡"]
  339. if any(char in question for char in private_kw):
  340. source["datasource"] = "自有數據"
  341. elif any(char in question for char in public_kw):
  342. source["datasource"] = "專業知識"
  343. # print(source)
  344. print(source["datasource"])
  345. if source["datasource"] == "自有數據":
  346. # print("---ROUTE QUESTION TO TEXT-TO-SQL---")
  347. progress_bar = show_progress(state, "---ROUTE QUESTION TO TEXT-TO-SQL---")
  348. return "自有數據"
  349. elif source["datasource"] == "專業知識":
  350. # print("---ROUTE QUESTION TO RAG---")
  351. progress_bar = show_progress(state, "---ROUTE QUESTION TO RAG---")
  352. return "專業知識"
  353. def grade_generation_v_documents_and_question(state):
  354. """
  355. Determines whether the generation is grounded in the document and answers question.
  356. Args:
  357. state (dict): The current graph state
  358. Returns:
  359. str: Decision for next node to call
  360. """
  361. # print("---CHECK HALLUCINATIONS---")
  362. question = state["question"]
  363. documents = state["documents"]
  364. generation = state["generation"]
  365. progress_bar = show_progress(state, "---GRADE GENERATION vs QUESTION---")
  366. answer_grader = Answer_Grader()
  367. score = answer_grader.invoke({"question": question, "generation": generation})
  368. grade = score["score"]
  369. if grade in ["yes", "true", 1, "1"]:
  370. # print("---DECISION: GENERATION ADDRESSES QUESTION---")
  371. progress_bar = show_progress(state, "---DECISION: GENERATION ADDRESSES QUESTION---")
  372. return "useful"
  373. else:
  374. # print("---DECISION: GENERATION DOES NOT ADDRESS QUESTION---")
  375. progress_bar = show_progress(state, "---DECISION: GENERATION DOES NOT ADDRESS QUESTION---")
  376. return "not useful"
  377. def grade_sql_query(state):
  378. """
  379. Determines whether the Postgresql query are correct to the question
  380. Args:
  381. state (dict): The current graph state
  382. Returns:
  383. state (dict): Decision for retry or continue
  384. """
  385. # print("---CHECK SQL CORRECTNESS TO QUESTION---")
  386. progress_bar = show_progress(state, "---CHECK SQL CORRECTNESS TO QUESTION---")
  387. question = state["question"]
  388. sql_query = state["sql_query"]
  389. sql_result = state["sql_result"]
  390. if "None" in sql_result or sql_result.startswith("Error:"):
  391. progress_bar = show_progress(state, "---INCORRECT SQL QUERY---")
  392. return "incorrect"
  393. else:
  394. print(sql_result)
  395. progress_bar = show_progress(state, "---CORRECT SQL QUERY---")
  396. return "correct"
  397. # retry = state["retry"]
  398. # # Score each doc
  399. # sql_query_grader = SQL_Grader()
  400. # score = sql_query_grader.invoke({"table_info": table_description(), "question": question, "sql_query": sql_query})
  401. # grade = score["score"]
  402. # # Document relevant
  403. # if grade in ["yes", "true", 1, "1"]:
  404. # # print("---GRADE: CORRECT SQL QUERY---")
  405. # progress_bar = show_progress(state, "---GRADE: CORRECT SQL QUERY---")
  406. # return "correct"
  407. # elif retry >= 5:
  408. # # print("---GRADE: INCORRECT SQL QUERY AND REACH RETRY LIMIT---")
  409. # progress_bar = show_progress(state, "---GRADE: INCORRECT SQL QUERY AND REACH RETRY LIMIT---")
  410. # return "failed"
  411. # else:
  412. # # print("---GRADE: INCORRECT SQL QUERY---")
  413. # progress_bar = show_progress(state, "---GRADE: INCORRECT SQL QUERY---")
  414. # return "incorrect"
  415. def check_sql_answer(state):
  416. progress_bar = show_progress(state, "---CHECK SQL ANSWER QUALITY---")
  417. generation = state["generation"]
  418. if "test@systex.com" in generation:
  419. progress_bar = show_progress(state, "---SQL CAN NOT GENERATE ANSWER---")
  420. return "bad"
  421. else:
  422. progress_bar = show_progress(state, "---SQL CAN GENERATE ANSWER---")
  423. return "good"
  424. def build_graph():
  425. workflow = StateGraph(GraphState)
  426. # Define the nodes
  427. workflow.add_node("Text-to-SQL", company_private_data_get_sql_query, retry=RetryPolicy(max_attempts=5)) # web search
  428. workflow.add_node("SQL Answer", company_private_data_search, retry=RetryPolicy(max_attempts=5)) # web search
  429. workflow.add_node("Additoinal Explanation", additional_explanation_question, retry=RetryPolicy(max_attempts=5)) # retrieve
  430. workflow.add_node("RAG", retrieve_and_generation, retry=RetryPolicy(max_attempts=5)) # retrieve
  431. workflow.add_node("ERROR", error) # retrieve
  432. company_private_data_search
  433. workflow.add_conditional_edges(
  434. START,
  435. route_question,
  436. {
  437. "自有數據": "Text-to-SQL",
  438. "專業知識": "RAG",
  439. },
  440. )
  441. workflow.add_conditional_edges(
  442. "RAG",
  443. grade_generation_v_documents_and_question,
  444. {
  445. "useful": END,
  446. "not useful": "ERROR",
  447. },
  448. )
  449. workflow.add_conditional_edges(
  450. "Text-to-SQL",
  451. grade_sql_query,
  452. {
  453. "correct": "SQL Answer",
  454. "incorrect": "RAG",
  455. },
  456. )
  457. workflow.add_conditional_edges(
  458. "SQL Answer",
  459. check_sql_answer,
  460. {
  461. "good": "Additoinal Explanation",
  462. "bad": "RAG",
  463. },
  464. )
  465. # workflow.add_edge("SQL Answer", "Additoinal Explanation")
  466. workflow.add_edge("Additoinal Explanation", END)
  467. app = workflow.compile()
  468. return app
  469. app = build_graph()
  470. draw_mermaid = app.get_graph().draw_mermaid()
  471. print(draw_mermaid)
  472. def main(question: str):
  473. inputs = {"question": question, "progress_bar": None}
  474. for output in app.stream(inputs, {"recursion_limit": 10}):
  475. for key, value in output.items():
  476. pprint(f"Finished running: {key}:")
  477. # pprint(value["generation"])
  478. # pprint(value)
  479. value["progress_bar"] = progress_bar
  480. # pprint(value["progress_bar"])
  481. # return value["generation"]
  482. return value
  483. def rag_main(question: str):
  484. inputs = {"question": question, "progress_bar": None, "route": "專業知識"}
  485. for output in app.stream(inputs, {"recursion_limit": 10}):
  486. for key, value in output.items():
  487. pprint(f"Finished running: {key}:")
  488. # pprint(value["generation"])
  489. # pprint(value)
  490. value["progress_bar"] = progress_bar
  491. # pprint(value["progress_bar"])
  492. # return value["generation"]
  493. return value
  494. if __name__ == "__main__":
  495. # result = main("建準去年的逸散排放總排放量是多少?")
  496. # result = main("建準廣興廠去年的上游運輸總排放量是多少?")
  497. result = main("建準北海廠去年的固定燃燒排放量是多少?")
  498. # result = main("溫室氣體是什麼?")
  499. # result = main("什麼是外購電力(綠電)?")
  500. print("------------------------------------------------------")
  501. print(result)