mirror of
https://github.com/fsecada01/Pygentic-AI.git
synced 2025-07-08 20:24:04 +00:00
initial building of routers and Agent management of SWOT analysis
This commit is contained in:
@ -12,6 +12,7 @@ httpx[socks]
|
||||
httpx-html
|
||||
html5lib
|
||||
hypercorn; platform_system == 'Windows'
|
||||
jinjax
|
||||
loguru
|
||||
lxml
|
||||
lxml[html_clean]
|
||||
@ -19,6 +20,7 @@ python-dateutil
|
||||
python-decouple
|
||||
python-slugify
|
||||
psycopg
|
||||
pydantic-ai[examples]
|
||||
pytz
|
||||
redis
|
||||
simplejson
|
||||
|
@ -10,16 +10,29 @@ annotated-types==0.7.0
|
||||
# via
|
||||
# pydantic
|
||||
# sqlmodel-crud-utilities
|
||||
anthropic==0.43.0
|
||||
# via pydantic-ai-slim
|
||||
anyio==4.8.0
|
||||
# via
|
||||
# anthropic
|
||||
# groq
|
||||
# httpx
|
||||
# openai
|
||||
# starlette
|
||||
appdirs==1.4.4
|
||||
# via pyppeteer
|
||||
asgiref==3.8.1
|
||||
# via opentelemetry-instrumentation-asgi
|
||||
asttokens==2.4.1
|
||||
# via devtools
|
||||
asyncpg==0.30.0
|
||||
# via pydantic-ai-examples
|
||||
beautifulsoup4==4.12.3
|
||||
# via httpx-html
|
||||
billiard==4.2.1
|
||||
# via celery
|
||||
cachetools==5.5.0
|
||||
# via google-auth
|
||||
celery==5.4.0
|
||||
# via
|
||||
# -r core_requirements.in
|
||||
@ -29,6 +42,9 @@ certifi==2024.12.14
|
||||
# httpcore
|
||||
# httpx
|
||||
# pyppeteer
|
||||
# requests
|
||||
charset-normalizer==3.4.1
|
||||
# via requests
|
||||
click==8.1.8
|
||||
# via
|
||||
# celery
|
||||
@ -45,11 +61,32 @@ click-repl==0.3.0
|
||||
colorama==0.4.6
|
||||
# via
|
||||
# click
|
||||
# griffe
|
||||
# loguru
|
||||
# sqlmodel-crud-utilities
|
||||
# tqdm
|
||||
cssselect==1.2.0
|
||||
# via pyquery
|
||||
deprecated==1.2.15
|
||||
# via
|
||||
# opentelemetry-api
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
# opentelemetry-semantic-conventions
|
||||
devtools==0.12.2
|
||||
# via pydantic-ai-examples
|
||||
distro==1.9.0
|
||||
# via
|
||||
# anthropic
|
||||
# groq
|
||||
# openai
|
||||
eval-type-backport==0.2.2
|
||||
# via
|
||||
# mistralai
|
||||
# pydantic-ai-slim
|
||||
executing==2.1.0
|
||||
# via
|
||||
# devtools
|
||||
# logfire
|
||||
fake-useragent==2.0.3
|
||||
# via httpx-html
|
||||
fastapi==0.115.6
|
||||
@ -57,17 +94,26 @@ fastapi==0.115.6
|
||||
# -r core_requirements.in
|
||||
# fastapi-restful
|
||||
# fastcrud
|
||||
# pydantic-ai-examples
|
||||
fastapi-restful==0.6.0
|
||||
# via -r core_requirements.in
|
||||
fastcrud==0.15.5
|
||||
# via -r core_requirements.in
|
||||
flower==2.0.1
|
||||
# via -r core_requirements.in
|
||||
google-auth==2.37.0
|
||||
# via pydantic-ai-slim
|
||||
googleapis-common-protos==1.66.0
|
||||
# via opentelemetry-exporter-otlp-proto-http
|
||||
greenlet==3.1.1
|
||||
# via
|
||||
# -r core_requirements.in
|
||||
# sqlalchemy
|
||||
# sqlmodel-crud-utilities
|
||||
griffe==1.5.5
|
||||
# via pydantic-ai-slim
|
||||
groq==0.15.0
|
||||
# via pydantic-ai-slim
|
||||
h11==0.14.0
|
||||
# via
|
||||
# httpcore
|
||||
@ -85,7 +131,12 @@ httpcore==1.0.7
|
||||
httpx==0.28.1
|
||||
# via
|
||||
# -r core_requirements.in
|
||||
# anthropic
|
||||
# groq
|
||||
# httpx-html
|
||||
# mistralai
|
||||
# openai
|
||||
# pydantic-ai-slim
|
||||
httpx-html==0.11.0.dev0
|
||||
# via -r core_requirements.in
|
||||
humanize==4.11.0
|
||||
@ -98,10 +149,27 @@ idna==3.10
|
||||
# via
|
||||
# anyio
|
||||
# httpx
|
||||
# requests
|
||||
importlib-metadata==8.5.0
|
||||
# via pyppeteer
|
||||
# via
|
||||
# opentelemetry-api
|
||||
# pyppeteer
|
||||
jinja2==3.1.5
|
||||
# via jinjax
|
||||
jinjax==0.48
|
||||
# via -r core_requirements.in
|
||||
jiter==0.8.2
|
||||
# via
|
||||
# anthropic
|
||||
# openai
|
||||
jsonpath-python==1.0.6
|
||||
# via mistralai
|
||||
kombu==5.4.2
|
||||
# via celery
|
||||
logfire==3.1.1
|
||||
# via pydantic-ai-examples
|
||||
logfire-api==3.1.1
|
||||
# via pydantic-ai-slim
|
||||
loguru==0.7.3
|
||||
# via
|
||||
# -r core_requirements.in
|
||||
@ -113,8 +181,75 @@ lxml==5.3.0
|
||||
# pyquery
|
||||
lxml-html-clean==0.4.1
|
||||
# via lxml
|
||||
markdown-it-py==3.0.0
|
||||
# via rich
|
||||
markupsafe==3.0.2
|
||||
# via
|
||||
# jinja2
|
||||
# jinjax
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
mistralai==1.3.1
|
||||
# via pydantic-ai-slim
|
||||
mypy-extensions==1.0.0
|
||||
# via typing-inspect
|
||||
openai==1.59.7
|
||||
# via pydantic-ai-slim
|
||||
opentelemetry-api==1.29.0
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
# opentelemetry-instrumentation
|
||||
# opentelemetry-instrumentation-asgi
|
||||
# opentelemetry-instrumentation-asyncpg
|
||||
# opentelemetry-instrumentation-dbapi
|
||||
# opentelemetry-instrumentation-fastapi
|
||||
# opentelemetry-instrumentation-sqlite3
|
||||
# opentelemetry-sdk
|
||||
# opentelemetry-semantic-conventions
|
||||
opentelemetry-exporter-otlp-proto-common==1.29.0
|
||||
# via opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-exporter-otlp-proto-http==1.29.0
|
||||
# via logfire
|
||||
opentelemetry-instrumentation==0.50b0
|
||||
# via
|
||||
# logfire
|
||||
# opentelemetry-instrumentation-asgi
|
||||
# opentelemetry-instrumentation-asyncpg
|
||||
# opentelemetry-instrumentation-dbapi
|
||||
# opentelemetry-instrumentation-fastapi
|
||||
# opentelemetry-instrumentation-sqlite3
|
||||
opentelemetry-instrumentation-asgi==0.50b0
|
||||
# via opentelemetry-instrumentation-fastapi
|
||||
opentelemetry-instrumentation-asyncpg==0.50b0
|
||||
# via logfire
|
||||
opentelemetry-instrumentation-dbapi==0.50b0
|
||||
# via opentelemetry-instrumentation-sqlite3
|
||||
opentelemetry-instrumentation-fastapi==0.50b0
|
||||
# via logfire
|
||||
opentelemetry-instrumentation-sqlite3==0.50b0
|
||||
# via logfire
|
||||
opentelemetry-proto==1.29.0
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-common
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-sdk==1.29.0
|
||||
# via
|
||||
# logfire
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-semantic-conventions==0.50b0
|
||||
# via
|
||||
# opentelemetry-instrumentation
|
||||
# opentelemetry-instrumentation-asgi
|
||||
# opentelemetry-instrumentation-asyncpg
|
||||
# opentelemetry-instrumentation-dbapi
|
||||
# opentelemetry-instrumentation-fastapi
|
||||
# opentelemetry-sdk
|
||||
opentelemetry-util-http==0.50b0
|
||||
# via
|
||||
# opentelemetry-instrumentation-asgi
|
||||
# opentelemetry-instrumentation-fastapi
|
||||
packaging==24.2
|
||||
# via opentelemetry-instrumentation
|
||||
parse==1.20.2
|
||||
# via httpx-html
|
||||
priority==2.0.0
|
||||
@ -123,23 +258,51 @@ prometheus-client==0.21.1
|
||||
# via flower
|
||||
prompt-toolkit==3.0.48
|
||||
# via click-repl
|
||||
protobuf==5.29.3
|
||||
# via
|
||||
# googleapis-common-protos
|
||||
# logfire
|
||||
# opentelemetry-proto
|
||||
psutil==5.9.8
|
||||
# via fastapi-restful
|
||||
psycopg==3.2.4
|
||||
# via -r core_requirements.in
|
||||
pyasn1==0.6.1
|
||||
# via
|
||||
# pyasn1-modules
|
||||
# rsa
|
||||
pyasn1-modules==0.4.1
|
||||
# via google-auth
|
||||
pydantic==2.10.5
|
||||
# via
|
||||
# anthropic
|
||||
# fastapi
|
||||
# fastapi-restful
|
||||
# fastcrud
|
||||
# groq
|
||||
# mistralai
|
||||
# openai
|
||||
# pydantic-ai-slim
|
||||
# sqlmodel
|
||||
# sqlmodel-crud-utilities
|
||||
pydantic-ai==0.0.18
|
||||
# via -r core_requirements.in
|
||||
pydantic-ai-examples==0.0.18
|
||||
# via pydantic-ai
|
||||
pydantic-ai-slim==0.0.18
|
||||
# via
|
||||
# pydantic-ai
|
||||
# pydantic-ai-examples
|
||||
pydantic-core==2.27.2
|
||||
# via
|
||||
# pydantic
|
||||
# sqlmodel-crud-utilities
|
||||
pyee==11.1.1
|
||||
# via pyppeteer
|
||||
pygments==2.19.1
|
||||
# via
|
||||
# devtools
|
||||
# rich
|
||||
pymysql==1.1.1
|
||||
# via aiomysql
|
||||
pyppeteer==2.0.0
|
||||
@ -150,11 +313,14 @@ python-dateutil==2.9.0.post0
|
||||
# via
|
||||
# -r core_requirements.in
|
||||
# celery
|
||||
# mistralai
|
||||
# sqlmodel-crud-utilities
|
||||
python-decouple==3.8
|
||||
# via -r core_requirements.in
|
||||
python-dotenv==1.0.1
|
||||
# via sqlmodel-crud-utilities
|
||||
python-multipart==0.0.20
|
||||
# via pydantic-ai-examples
|
||||
python-slugify==8.0.4
|
||||
# via -r core_requirements.in
|
||||
pytz==2024.2
|
||||
@ -163,16 +329,31 @@ pytz==2024.2
|
||||
# flower
|
||||
redis==5.2.1
|
||||
# via -r core_requirements.in
|
||||
requests==2.32.3
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
# pydantic-ai-slim
|
||||
rich==13.9.4
|
||||
# via
|
||||
# logfire
|
||||
# pydantic-ai-examples
|
||||
rsa==4.9
|
||||
# via google-auth
|
||||
simplejson==3.19.3
|
||||
# via -r core_requirements.in
|
||||
six==1.17.0
|
||||
# via
|
||||
# asttokens
|
||||
# html5lib
|
||||
# python-dateutil
|
||||
# sqlalchemy-mixins
|
||||
# sqlmodel-crud-utilities
|
||||
sniffio==1.3.1
|
||||
# via anyio
|
||||
# via
|
||||
# anthropic
|
||||
# anyio
|
||||
# groq
|
||||
# openai
|
||||
socksio==1.0.0
|
||||
# via httpx
|
||||
soupsieve==2.6
|
||||
@ -201,12 +382,17 @@ text-unidecode==1.3
|
||||
tornado==6.4.2
|
||||
# via flower
|
||||
tqdm==4.67.1
|
||||
# via pyppeteer
|
||||
# via
|
||||
# openai
|
||||
# pyppeteer
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# anyio
|
||||
# anthropic
|
||||
# fastapi
|
||||
# psycopg
|
||||
# groq
|
||||
# logfire
|
||||
# openai
|
||||
# opentelemetry-sdk
|
||||
# pydantic
|
||||
# pydantic-core
|
||||
# pyee
|
||||
@ -214,16 +400,22 @@ typing-extensions==4.12.2
|
||||
# sqlmodel-crud-utilities
|
||||
# typing-inspect
|
||||
typing-inspect==0.9.0
|
||||
# via -r core_requirements.in
|
||||
# via
|
||||
# -r core_requirements.in
|
||||
# mistralai
|
||||
tzdata==2024.2
|
||||
# via
|
||||
# celery
|
||||
# kombu
|
||||
# psycopg
|
||||
urllib3==1.26.20
|
||||
# via pyppeteer
|
||||
# via
|
||||
# pyppeteer
|
||||
# requests
|
||||
uvicorn==0.34.0
|
||||
# via -r core_requirements.in
|
||||
# via
|
||||
# -r core_requirements.in
|
||||
# pydantic-ai-examples
|
||||
vine==5.1.0
|
||||
# via
|
||||
# amqp
|
||||
@ -241,6 +433,11 @@ win32-setctime==1.2.0
|
||||
# via
|
||||
# loguru
|
||||
# sqlmodel-crud-utilities
|
||||
wrapt==1.17.2
|
||||
# via
|
||||
# deprecated
|
||||
# opentelemetry-instrumentation
|
||||
# opentelemetry-instrumentation-dbapi
|
||||
wsproto==1.2.0
|
||||
# via hypercorn
|
||||
xmljson==0.2.1
|
||||
|
@ -19,8 +19,10 @@ argon2-cffi-bindings==21.2.0
|
||||
# via argon2-cffi
|
||||
arrow==1.3.0
|
||||
# via isoduration
|
||||
asttokens==3.0.0
|
||||
# via stack-data
|
||||
asttokens==2.4.1
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
# stack-data
|
||||
async-lru==2.0.4
|
||||
# via jupyterlab
|
||||
attrs==24.3.0
|
||||
@ -48,7 +50,9 @@ cffi==1.17.1
|
||||
cfgv==3.4.0
|
||||
# via pre-commit
|
||||
charset-normalizer==3.4.1
|
||||
# via requests
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
# requests
|
||||
click==8.1.8
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
@ -60,7 +64,7 @@ colorama==0.4.6
|
||||
# ipython
|
||||
comm==0.2.2
|
||||
# via ipykernel
|
||||
debugpy==1.8.11
|
||||
debugpy==1.8.12
|
||||
# via ipykernel
|
||||
decorator==5.1.1
|
||||
# via ipython
|
||||
@ -69,7 +73,9 @@ defusedxml==0.7.1
|
||||
distlib==0.3.9
|
||||
# via virtualenv
|
||||
executing==2.1.0
|
||||
# via stack-data
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
# stack-data
|
||||
fastapi==0.115.6
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
@ -119,6 +125,7 @@ jedi==0.19.2
|
||||
# via ipython
|
||||
jinja2==3.1.5
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
# fastapi-debug-toolbar
|
||||
# jupyter-server
|
||||
# jupyterlab
|
||||
@ -174,6 +181,7 @@ mako==1.3.8
|
||||
# via alembic
|
||||
markupsafe==3.0.2
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
# jinja2
|
||||
# mako
|
||||
# nbconvert
|
||||
@ -206,6 +214,7 @@ overrides==7.7.0
|
||||
# via jupyter-server
|
||||
packaging==24.2
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
# black
|
||||
# ipykernel
|
||||
# jupyter-server
|
||||
@ -259,6 +268,7 @@ pydantic-settings==2.7.1
|
||||
# via fastapi-debug-toolbar
|
||||
pygments==2.19.1
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
# ipython
|
||||
# nbconvert
|
||||
pyinstrument==5.0.0
|
||||
@ -290,13 +300,15 @@ pyzmq==26.2.0
|
||||
# ipykernel
|
||||
# jupyter-client
|
||||
# jupyter-server
|
||||
referencing==0.35.1
|
||||
referencing==0.36.0
|
||||
# via
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
# jupyter-events
|
||||
requests==2.32.3
|
||||
# via jupyterlab-server
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
# jupyterlab-server
|
||||
rfc3339-validator==0.1.4
|
||||
# via
|
||||
# jsonschema
|
||||
@ -318,6 +330,7 @@ setuptools==75.8.0
|
||||
six==1.17.0
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
# asttokens
|
||||
# python-dateutil
|
||||
# rfc3339-validator
|
||||
sniffio==1.3.1
|
||||
@ -374,7 +387,6 @@ typing-extensions==4.12.2
|
||||
# via
|
||||
# -c core_requirements.txt
|
||||
# alembic
|
||||
# anyio
|
||||
# fastapi
|
||||
# pydantic
|
||||
# pydantic-core
|
||||
|
@ -79,24 +79,39 @@ dependencies = [
|
||||
"aiomysql==0.2.0",
|
||||
"amqp==5.3.1",
|
||||
"annotated-types==0.7.0",
|
||||
"anthropic==0.43.0",
|
||||
"anyio==4.8.0",
|
||||
"appdirs==1.4.4",
|
||||
"asgiref==3.8.1",
|
||||
"asttokens==2.4.1",
|
||||
"asyncpg==0.30.0",
|
||||
"beautifulsoup4==4.12.3",
|
||||
"billiard==4.2.1",
|
||||
"cachetools==5.5.0",
|
||||
"celery==5.4.0",
|
||||
"certifi==2024.12.14",
|
||||
"charset-normalizer==3.4.1",
|
||||
"click-didyoumean==0.3.1",
|
||||
"click-plugins==1.1.1",
|
||||
"click-repl==0.3.0",
|
||||
"click==8.1.8",
|
||||
"colorama==0.4.6",
|
||||
"cssselect==1.2.0",
|
||||
"deprecated==1.2.15",
|
||||
"devtools==0.12.2",
|
||||
"distro==1.9.0",
|
||||
"eval-type-backport==0.2.2",
|
||||
"executing==2.1.0",
|
||||
"fake-useragent==2.0.3",
|
||||
"fastapi-restful==0.6.0",
|
||||
"fastapi==0.115.6",
|
||||
"fastcrud==0.15.5",
|
||||
"flower==2.0.1",
|
||||
"google-auth==2.37.0",
|
||||
"googleapis-common-protos==1.66.0",
|
||||
"greenlet==3.1.1",
|
||||
"griffe==1.5.5",
|
||||
"groq==0.15.0",
|
||||
"h11==0.14.0",
|
||||
"h2==4.1.0",
|
||||
"hpack==4.0.0",
|
||||
@ -109,29 +124,65 @@ dependencies = [
|
||||
"hyperframe==6.0.1",
|
||||
"idna==3.10",
|
||||
"importlib-metadata==8.5.0",
|
||||
"jinja2==3.1.5",
|
||||
"jinjax==0.48",
|
||||
"jiter==0.8.2",
|
||||
"jsonpath-python==1.0.6",
|
||||
"kombu==5.4.2",
|
||||
"logfire-api==3.1.1",
|
||||
"logfire==3.1.1",
|
||||
"loguru==0.7.3",
|
||||
"lxml-html-clean==0.4.1",
|
||||
"lxml==5.3.0",
|
||||
"markdown-it-py==3.0.0",
|
||||
"markupsafe==3.0.2",
|
||||
"mdurl==0.1.2",
|
||||
"mistralai==1.3.1",
|
||||
"mypy-extensions==1.0.0",
|
||||
"openai==1.59.7",
|
||||
"opentelemetry-api==1.29.0",
|
||||
"opentelemetry-exporter-otlp-proto-common==1.29.0",
|
||||
"opentelemetry-exporter-otlp-proto-http==1.29.0",
|
||||
"opentelemetry-instrumentation-asgi==0.50b0",
|
||||
"opentelemetry-instrumentation-asyncpg==0.50b0",
|
||||
"opentelemetry-instrumentation-dbapi==0.50b0",
|
||||
"opentelemetry-instrumentation-fastapi==0.50b0",
|
||||
"opentelemetry-instrumentation-sqlite3==0.50b0",
|
||||
"opentelemetry-instrumentation==0.50b0",
|
||||
"opentelemetry-proto==1.29.0",
|
||||
"opentelemetry-sdk==1.29.0",
|
||||
"opentelemetry-semantic-conventions==0.50b0",
|
||||
"opentelemetry-util-http==0.50b0",
|
||||
"packaging==24.2",
|
||||
"parse==1.20.2",
|
||||
"priority==2.0.0",
|
||||
"prometheus-client==0.21.1",
|
||||
"prompt-toolkit==3.0.48",
|
||||
"protobuf==5.29.3",
|
||||
"psutil==5.9.8",
|
||||
"psycopg==3.2.4",
|
||||
"pyasn1-modules==0.4.1",
|
||||
"pyasn1==0.6.1",
|
||||
"pydantic-ai-examples==0.0.18",
|
||||
"pydantic-ai-slim==0.0.18",
|
||||
"pydantic-ai==0.0.18",
|
||||
"pydantic-core==2.27.2",
|
||||
"pydantic==2.10.5",
|
||||
"pyee==11.1.1",
|
||||
"pygments==2.19.1",
|
||||
"pymysql==1.1.1",
|
||||
"pyppeteer==2.0.0",
|
||||
"pyquery==2.0.1",
|
||||
"python-dateutil==2.9.0.post0",
|
||||
"python-decouple==3.8",
|
||||
"python-dotenv==1.0.1",
|
||||
"python-multipart==0.0.20",
|
||||
"python-slugify==8.0.4",
|
||||
"pytz==2024.2",
|
||||
"redis==5.2.1",
|
||||
"requests==2.32.3",
|
||||
"rich==13.9.4",
|
||||
"rsa==4.9",
|
||||
"simplejson==3.19.3",
|
||||
"six==1.17.0",
|
||||
"sniffio==1.3.1",
|
||||
@ -157,6 +208,7 @@ dependencies = [
|
||||
"webencodings==0.5.1",
|
||||
"websockets==10.4",
|
||||
"win32-setctime==1.2.0",
|
||||
"wrapt==1.17.2",
|
||||
"wsproto==1.2.0",
|
||||
"xmljson==0.2.1",
|
||||
"xmltodict==0.14.2",
|
||||
@ -171,7 +223,7 @@ dev = [
|
||||
"argon2-cffi-bindings==21.2.0",
|
||||
"argon2-cffi==23.1.0",
|
||||
"arrow==1.3.0",
|
||||
"asttokens==3.0.0",
|
||||
"asttokens==2.4.1",
|
||||
"async-lru==2.0.4",
|
||||
"attrs==24.3.0",
|
||||
"babel==2.16.0",
|
||||
@ -185,7 +237,7 @@ dev = [
|
||||
"click==8.1.8",
|
||||
"colorama==0.4.6",
|
||||
"comm==0.2.2",
|
||||
"debugpy==1.8.11",
|
||||
"debugpy==1.8.12",
|
||||
"decorator==5.1.1",
|
||||
"defusedxml==0.7.1",
|
||||
"distlib==0.3.9",
|
||||
@ -257,7 +309,7 @@ dev = [
|
||||
"pywinpty==2.0.14",
|
||||
"pyyaml==6.0.2",
|
||||
"pyzmq==26.2.0",
|
||||
"referencing==0.35.1",
|
||||
"referencing==0.36.0",
|
||||
"requests==2.32.3",
|
||||
"rfc3339-validator==0.1.4",
|
||||
"rfc3986-validator==0.1.1",
|
||||
|
@ -0,0 +1,21 @@
|
||||
AI_MODEL = "gpt-4o"
|
||||
default_system_prompt = """
|
||||
You are an advanced and intelligent AI assistant specializing in generating
|
||||
comprehensive and detailed SWOT analyses for a variety of scenarios, topics,
|
||||
or businesses.
|
||||
|
||||
Your task is to analyze given entities, situations, or contexts and organize
|
||||
the information systematically into (typically provided as an URL value):
|
||||
1. **Strengths**: Internal factors that give a competitive advantage or are beneficial.
|
||||
2. **Weaknesses**: Internal factors that hinder performance or present challenges.
|
||||
3. **Opportunities**: External factors that can be leveraged for growth or improvement.
|
||||
4. **Threats**: External factors that pose risks or challenges.
|
||||
|
||||
Guidelines for your approach:
|
||||
- Be concise yet descriptive in your explanation for each category.
|
||||
- Consider internal factors for Strengths and Weaknesses (e.g., resources,
|
||||
skills, limitations).
|
||||
- Focus on external or environmental factors for Opportunities and Threats (
|
||||
e.g., market trends, competition, external risks).
|
||||
- Provide additional context or examples to support your points when relevant.
|
||||
"""
|
||||
|
@ -0,0 +1,36 @@
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
from pydantic_ai import Agent
|
||||
from pydantic_ai.models.openai import OpenAIModel
|
||||
|
||||
from backend.core.consts import AI_MODEL, default_system_prompt
|
||||
from backend.db.base import Base
|
||||
from backend.utils import get_val
|
||||
|
||||
|
||||
class SwotAnalysis(Base):
|
||||
"""SQLModel for SWOT Analysis Response Object"""
|
||||
|
||||
strengths: list[str]
|
||||
weaknesses: list[str]
|
||||
opportunities: list[str]
|
||||
threats: list[str]
|
||||
analysis: str
|
||||
|
||||
|
||||
class SwotAgentDeps(Base):
|
||||
"""Agent Dependencies for SWOT Analysis"""
|
||||
|
||||
request: Any | None = None
|
||||
update_status_func: Callable | None = None
|
||||
tool_history: list[str]
|
||||
|
||||
|
||||
swot_agent = Agent(
|
||||
OpenAIModel(model_name=AI_MODEL, api_key=get_val("OPENAI_API_KEY")),
|
||||
deps_type=SwotAgentDeps,
|
||||
result_type=SwotAnalysis,
|
||||
system_prompt=default_system_prompt,
|
||||
retries=5,
|
||||
)
|
||||
|
95
src/backend/core/tools.py
Normal file
95
src/backend/core/tools.py
Normal file
@ -0,0 +1,95 @@
|
||||
import httpx
|
||||
from bs4 import BeautifulSoup as soup
|
||||
from pydantic_ai import RunContext
|
||||
|
||||
from backend.core.consts import AI_MODEL
|
||||
from backend.core.core import SwotAgentDeps, swot_agent
|
||||
from backend.core.utils import report_tool_usage
|
||||
from backend.logger import logger
|
||||
|
||||
|
||||
@swot_agent.tool(prepare=report_tool_usage)
|
||||
async def fetch_website_content(
|
||||
_ctx: RunContext[SwotAgentDeps], url: str
|
||||
) -> str:
|
||||
"""
|
||||
Fetches the HTML content of the given URL via httpx and beautifulsoup
|
||||
|
||||
:param _ctx: RunContext[SwotAgentDeps]
|
||||
:param url: str
|
||||
:return: str
|
||||
"""
|
||||
logger.info(f"Fetching website content for: {url}")
|
||||
async with httpx.AsyncClient(follow_redirects=True) as http_client:
|
||||
try:
|
||||
response = await http_client.get(url)
|
||||
response.raise_for_status()
|
||||
html_content = response.text
|
||||
content = soup(html_content, "html.parser")
|
||||
text_content = content.get_text(separator=" ", strip=True)
|
||||
return text_content
|
||||
except httpx.HTTPError as e:
|
||||
logger.info(f"Request failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@swot_agent.tool(prepare=report_tool_usage)
|
||||
async def analyze_competition(
|
||||
ctx: RunContext[SwotAgentDeps],
|
||||
product_name: str,
|
||||
product_description: str,
|
||||
) -> str:
|
||||
"""Analyzes the competition for the given product using the Gemini model."""
|
||||
logger.info(f"Analyzing competition for: {product_name}")
|
||||
|
||||
prompt = f"""
|
||||
You are a competitive analysis expert. Analyze the competition for the following product:
|
||||
Product Name: {product_name}
|
||||
Product Description: {product_description}
|
||||
|
||||
Provide a detailed analysis of:
|
||||
1. Key competitors and their market position
|
||||
2. Competitive advantages and disadvantages
|
||||
3. Market trends and potential disruptions
|
||||
4. Entry barriers and competitive pressures
|
||||
"""
|
||||
|
||||
if not ctx.deps.client:
|
||||
logger.info("Error: OpenAI client not initialized.")
|
||||
return ""
|
||||
try:
|
||||
response = await ctx.deps.client.aio.models.generate_content(
|
||||
model=AI_MODEL,
|
||||
contents=prompt,
|
||||
)
|
||||
return response.text
|
||||
except Exception as e:
|
||||
logger.error(f"Error analyzing competition: {e}")
|
||||
return f"Error analyzing competition: {e}"
|
||||
|
||||
|
||||
@swot_agent.tool(prepare=report_tool_usage)
|
||||
async def get_reddit_insights(
|
||||
ctx: RunContext[SwotAgentDeps], query: str, subreddit_name: str = "python"
|
||||
):
|
||||
"""
|
||||
A tool to gain insights from a subreddit. Data is returned as string
|
||||
with newlines
|
||||
|
||||
:param ctx: RunContext[SwotAgentDeps]
|
||||
:param query: str
|
||||
:param subreddit_name: str
|
||||
:return: str
|
||||
"""
|
||||
subreddit = ctx.deps.reddit.subreddit(subreddit_name)
|
||||
search_results = subreddit.search(query)
|
||||
|
||||
insights = []
|
||||
for post in search_results:
|
||||
insights.append(
|
||||
f"Title: {post.title}\n"
|
||||
f"URL: {post.url}\n"
|
||||
f"Content: {post.selftext}\n"
|
||||
)
|
||||
|
||||
return "\n".join(insights)
|
@ -0,0 +1,25 @@
|
||||
from pydantic_ai import RunContext
|
||||
from pydantic_ai.tools import ToolDefinition
|
||||
|
||||
from backend.core.core import SwotAgentDeps
|
||||
|
||||
|
||||
async def report_tool_usage(
|
||||
ctx: RunContext[SwotAgentDeps], tool_def: ToolDefinition
|
||||
) -> ToolDefinition:
|
||||
"""
|
||||
Reports tool usage + results to an update function
|
||||
:param ctx: RunContext[SwotAgentDeps]
|
||||
:param tool_def: ToolDefinition
|
||||
:return:
|
||||
"""
|
||||
if tool_def.name in ctx.deps.tool_history:
|
||||
return tool_def
|
||||
|
||||
if ctx.deps.update_status_func:
|
||||
await ctx.deps.update_status_func(
|
||||
ctx.deps.request, f"Using tool: {tool_def.name}..."
|
||||
)
|
||||
ctx.deps.tool_history.append(tool_def.name)
|
||||
|
||||
return tool_def
|
||||
|
0
src/backend/server/router.py
Normal file
0
src/backend/server/router.py
Normal file
@ -0,0 +1,9 @@
|
||||
from collections import defaultdict
|
||||
from typing import Any, Final
|
||||
|
||||
ANALYZING_MESSAGE: Final = "Analyzing..."
|
||||
ANALYSIS_COMPLETE_MESSAGE = "Analysis complete!"
|
||||
|
||||
running_tasks = set()
|
||||
status_store: dict[str, list] = defaultdict(list)
|
||||
result_store: dict[str, Any] = {}
|
||||
|
157
src/backend/site/router.py
Normal file
157
src/backend/site/router.py
Normal file
@ -0,0 +1,157 @@
|
||||
import asyncio
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Form, Request
|
||||
from jinjax import Catalog, JinjaX
|
||||
from starlette.responses import HTMLResponse
|
||||
from starlette.staticfiles import StaticFiles
|
||||
from starlette.templating import Jinja2Templates
|
||||
|
||||
from backend.core.core import SwotAnalysis
|
||||
from backend.logger import logger
|
||||
from backend.settings import app_settings
|
||||
from backend.site.consts import (
|
||||
ANALYSIS_COMPLETE_MESSAGE,
|
||||
ANALYZING_MESSAGE,
|
||||
result_store,
|
||||
status_store,
|
||||
)
|
||||
|
||||
user_frontend = APIRouter(prefix="", tags=["frontend"])
|
||||
frontend = app_settings.frontend_dir
|
||||
|
||||
templates = Jinja2Templates(directory=os.path.join(frontend, "templates"))
|
||||
templates.env.add_extension(JinjaX)
|
||||
catalog = Catalog(jinja_env=templates.env)
|
||||
list(
|
||||
map(
|
||||
lambda folder: catalog.add_folder(
|
||||
os.path.join(frontend, "components", folder) # noqa
|
||||
),
|
||||
("main", "forms", "snippets"),
|
||||
)
|
||||
)
|
||||
|
||||
user_frontend.mount(
|
||||
"/static",
|
||||
StaticFiles(directory=os.path.join(frontend, "static")),
|
||||
name="static",
|
||||
)
|
||||
|
||||
|
||||
def run_agent_with_progress(session_id, url):
|
||||
pass
|
||||
|
||||
|
||||
@user_frontend.post("analyze", response_class=HTMLResponse)
|
||||
async def analyze_url(request: Request, url: str = Form(...)) -> HTMLResponse:
|
||||
"""
|
||||
Analyze a given URL using SWOT analysis agent
|
||||
:param request:
|
||||
:param url:
|
||||
:return:
|
||||
"""
|
||||
running_tasks = set()
|
||||
session_id = str(id(request))
|
||||
request.session["analysis_id"] = session_id
|
||||
request.session["start_time"] = asyncio.get_event_loop().time()
|
||||
|
||||
# Clearing out the status store for the analysis ID session
|
||||
status_store[session_id] = []
|
||||
|
||||
status_store[session_id].append(ANALYZING_MESSAGE)
|
||||
|
||||
logger.info(f"Starting new analysis with session ID: {session_id}")
|
||||
|
||||
task = asyncio.create_task(run_agent_with_progress(session_id, url))
|
||||
running_tasks.add(task)
|
||||
task.add_done_callback(running_tasks.discard)
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"status.html",
|
||||
context={
|
||||
"request": request,
|
||||
"messages": [ANALYZING_MESSAGE],
|
||||
"result": False,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@user_frontend.get("/status", response_class=HTMLResponse)
|
||||
async def get_status(request: Request):
|
||||
"""
|
||||
Returns the current status messages
|
||||
:param request:
|
||||
:return:
|
||||
"""
|
||||
context = {"request": request, "messages": [], "result": False}
|
||||
session_id = request.session.get("analysis_id")
|
||||
if session_id:
|
||||
messages = status_store.get(session_id, [])
|
||||
result = ANALYSIS_COMPLETE_MESSAGE in messages
|
||||
logger.info(
|
||||
f"Status check - Session ID: {'session_id'}, Messages: "
|
||||
f"{messages}"
|
||||
)
|
||||
|
||||
context.update({"messages": messages, "result": result})
|
||||
|
||||
return templates.TemplateResponse("status.html", context=context)
|
||||
|
||||
|
||||
@user_frontend.get("/result", response_class=HTMLResponse)
|
||||
async def get_result(request: Request) -> HTMLResponse:
|
||||
"""
|
||||
Returns the SWOT analysis result from the existing session ID.
|
||||
|
||||
:param request: Request
|
||||
:return: HTMLResponse
|
||||
"""
|
||||
session_id = request.session.get("analysis_id")
|
||||
|
||||
if session_id and session_id in result_store:
|
||||
result = result_store[session_id]
|
||||
else:
|
||||
result = None
|
||||
|
||||
return templates.TemplateResponse(
|
||||
"result.html",
|
||||
{"request": request, "result": result},
|
||||
)
|
||||
|
||||
|
||||
def emulate_tool_completion(session_id: str, message: str) -> None:
|
||||
"""Pydantic AI doesn't provide a post-processing hook, so we need to emulate one."""
|
||||
|
||||
# Sleep a random amount of time between 0 and 5 seconds
|
||||
time.sleep(random.randint(0, 5))
|
||||
status_store[session_id].append(message)
|
||||
|
||||
|
||||
async def update_status(session_id: str, message: Any) -> None:
|
||||
"""Updates status messages and handles SWOT analysis results."""
|
||||
logger.info(f"Updating status for session {session_id}: {message}")
|
||||
|
||||
# Handle SWOT analysis result
|
||||
if isinstance(message, SwotAnalysis):
|
||||
result_store[session_id] = message.model_dump()
|
||||
status_store[session_id].append(ANALYSIS_COMPLETE_MESSAGE)
|
||||
return
|
||||
|
||||
# Handle string messages
|
||||
if isinstance(message, str):
|
||||
# Instantly store first status message, emulate tool completion for others
|
||||
if message == ANALYSIS_COMPLETE_MESSAGE:
|
||||
status_store[session_id].append(message)
|
||||
else:
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(
|
||||
None, emulate_tool_completion, session_id, message
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Status messages for session {session_id}: {status_store[session_id]}"
|
||||
)
|
Reference in New Issue
Block a user