Delete 9. Image Processing Agent.ipynb

This commit is contained in:
Masih Moafi 2025-03-03 07:52:48 +03:30 committed by GitHub
parent ac7ee5bdf1
commit 99b56f3cbf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1,234 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c413571f-c824-49be-ba5a-97e9a8476a6d",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import httpx\n",
"import json\n",
"from crewai import Agent, Task, Crew\n",
"from langchain.agents import tool\n",
"from langchain_community.llms import Ollama\n",
"\n",
"# Directory for your domain-specific documents (assumed to be .txt files)\n",
"DOCS_DIR = \"docs\"\n",
"\n",
"# Load documents from the 'docs' directory\n",
"def load_documents():\n",
" \"\"\"Loads all .txt files from the 'docs' directory and returns a list of (filename, content) tuples.\"\"\"\n",
" if not os.path.exists(DOCS_DIR):\n",
" return []\n",
" documents = []\n",
" for filename in os.listdir(DOCS_DIR):\n",
" if filename.endswith(\".txt\"):\n",
" with open(os.path.join(DOCS_DIR, filename), \"r\", encoding=\"utf-8\") as f:\n",
" content = f.read()\n",
" documents.append((filename, content))\n",
" return documents\n",
"\n",
"DOCUMENTS = load_documents()\n",
"\n",
"@tool\n",
"def google_search(query: str) -> str:\n",
" \"\"\"Searches Google using Serper API for web results based on the query.\n",
"\n",
" Args:\n",
" query (str): The search query.\n",
"\n",
" Returns:\n",
" str: A summary of Google search results from Serper or a message if the search fails.\n",
" \"\"\"\n",
" url = \"https://google.serper.dev/search\"\n",
" headers = {\n",
" \"X-API-KEY\": \"ebda9508d11e4cc8c5091494a76b783fda7fe6da\",\n",
" \"Content-Type\": \"application/json\"\n",
" }\n",
" response = httpx.post(url, json={\"q\": query}, headers=headers)\n",
" if response.status_code == 200:\n",
" data = response.json()\n",
" results = []\n",
" for result in data.get(\"organic\", []):\n",
" title = result.get(\"title\", \"No title\")\n",
" snippet = result.get(\"snippet\", \"\")\n",
" link = result.get(\"link\", \"\")\n",
" results.append(f\"{title}: {snippet} ({link})\")\n",
" if len(results) >= 3:\n",
" break\n",
" return \"\\n\".join(results) if results else \"No results found using Google Serper.\"\n",
" return \"Error performing Google Serper search.\"\n",
"\n",
"@tool\n",
"def search_docs(query: str) -> str:\n",
" \"\"\"Searches local documents for relevant information based on the query.\n",
" \n",
" Args:\n",
" query (str): The search query.\n",
" \n",
" Returns:\n",
" str: Snippets from documents matching the query or a message if no documents are found.\n",
" \"\"\"\n",
" if not DOCUMENTS:\n",
" return \"No documents found.\"\n",
" query_words = query.lower().split()\n",
" results = []\n",
" for filename, content in DOCUMENTS:\n",
" lines = content.split('\\n')\n",
" for line in lines:\n",
" if any(word in line.lower() for word in query_words):\n",
" results.append(f\"From {filename}: {line[:200]}\")\n",
" if len(results) >= 3:\n",
" break\n",
" if len(results) >= 3:\n",
" break\n",
" return \"\\n\".join(results) if results else \"No relevant information found in documents.\"\n",
"\n",
"@tool\n",
"def wikipedia(query: str) -> str:\n",
" \"\"\"Searches Persian Wikipedia for a summary based on the query.\n",
" \n",
" Args:\n",
" query (str): The search query.\n",
" \n",
" Returns:\n",
" str: The snippet from the first Wikipedia search result or a message if no results are found.\n",
" \"\"\"\n",
" response = httpx.get(\"https://fa.wikipedia.org/w/api.php\", params={\n",
" \"action\": \"query\",\n",
" \"list\": \"search\",\n",
" \"srsearch\": query,\n",
" \"format\": \"json\",\n",
" \"utf8\": 1\n",
" })\n",
" results = response.json().get(\"query\", {}).get(\"search\", [])\n",
" if results:\n",
" return results[0].get(\"snippet\", \"اطلاعاتی یافت نشد.\")\n",
" return \"اطلاعاتی یافت نشد در ویکی‌پدیا.\"\n",
"\n",
"@tool\n",
"def duckduckgo(query: str) -> str:\n",
" \"\"\"Searches DuckDuckGo for web results based on the query.\n",
" \n",
" Args:\n",
" query (str): The search query.\n",
" \n",
" Returns:\n",
" str: A placeholder message with search results or a message if the search fails.\n",
" \"\"\"\n",
" response = httpx.get(\"https://duckduckgo.com/html/\", params={\"q\": query})\n",
" if response.status_code == 200:\n",
" return \"نتایج جستجوی DuckDuckGo: [صفحه 1, صفحه 2, صفحه 3]\"\n",
" return \"نتایجی از DuckDuckGo یافت نشد.\"\n",
"\n",
"# Set up the language model (using Llama3.2 for now)\n",
"llm = Ollama(model=\"llama3.2\")\n",
"\n",
"# Create the agent with CrewAI prioritizing Google Serper search first\n",
"agent = Agent(\n",
" role=\"researcher\",\n",
" goal=\"Answer the user's question by primarily using Google search results from the Serper API, then using local documents, Persian Wikipedia, and DuckDuckGo if needed.\",\n",
" backstory=\"You are an AI assistant designed to efficiently gather and synthesize information from multiple sources, prioritizing Google search results from the Serper API.\",\n",
" tools=[google_search, search_docs, wikipedia, duckduckgo],\n",
" llm=llm,\n",
" verbose=True\n",
")\n",
"\n",
"def query(question):\n",
" \"\"\"Runs the query through the agent and saves the result.\n",
" \n",
" Args:\n",
" question (str): The user's question.\n",
" \"\"\"\n",
" task = Task(\n",
" description=question,\n",
" expected_output=\"A detailed answer to the user's question.\",\n",
" agent=agent\n",
" )\n",
" crew = Crew(\n",
" agents=[agent],\n",
" tasks=[task],\n",
" verbose=2\n",
" )\n",
" result = crew.kickoff()\n",
" print(result)\n",
" save_to_file(str(result))\n",
"\n",
"def save_to_file(text, path=\"C:/Users/ASUS/Downloads/Month 2/agents.txt\"):\n",
" \"\"\"Saves the result to a file.\n",
" \n",
" Args:\n",
" text (str): The text to save.\n",
" path (str): The file path to save the text.\n",
" \"\"\"\n",
" with open(path, 'a', encoding='utf-8') as f:\n",
" f.write(text + \"\\n\" + \"=\"*50 + \"\\n\")\n",
"\n",
"if __name__ == \"__main__\":\n",
" # Example queries to test the agent\n",
" q1 = \"شاه عباس صفوی آدم خوبی بوده؟ چرا؟\"\n",
" q2 = \"وقتی چراغ DNS مودم قطع میشه به چه معنیه؟\"\n",
" query(q1)\n",
" query(q2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "052e7c32-65c4-40ce-8454-3c404075a80f",
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"\n",
"url = \"https://api.va.landing.ai/v1/tools/agentic-document-analysis\"\n",
"files = {\n",
" \"image\": open(\"C:/Users/ASUS/Desktop/benefits of RAG.png\", \"rb\")\n",
"}\n",
"headers = {\n",
" \"Authorization\": \"Basic YWdmbjl0d200emV3cjVicHFzeXpuOmFBWGJCTGZRUFBsMzZVVWF3aXFVYVduY0hGVUdIQmhq\",\n",
"}\n",
"response = requests.post(url, files=files, headers=headers)\n",
"print(response.json())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0b48cc6f-81be-48f0-be88-1de07a0f47f6",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"\n",
"with open(\"C:/Users/ASUS/Desktop/today.txt\", \"r\") as f:\n",
" data = json.load(f)\n",
"\n",
"print(data[\"data\"][\"markdown\"])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}