218 lines
6.9 KiB
Python
218 lines
6.9 KiB
Python
from core.core import RequestManager, wer_nltk
|
||
from core.base_model import *
|
||
from router.bale.base_model import *
|
||
import requests
|
||
|
||
"""
|
||
روند هر مرحله در اینجا مشخص می شود و داده و خروجی و پردازش در اینجا انجام می شود
|
||
"""
|
||
__all__ = ["Operation"]
|
||
|
||
|
||
class Operation:
|
||
def __init__(self, request_manager: RequestManager):
|
||
self.request_manager = request_manager
|
||
|
||
async def search_in_law(
|
||
self, query: str, limit: int, rerank_model: str, embed_model: str
|
||
) -> BMNewSemanticSearchOutput:
|
||
"""
|
||
فقط منطق – بدون هیچ وابستگی به Bale یا User
|
||
"""
|
||
|
||
result = await self.request_manager.get_result(
|
||
payload={
|
||
"query": query,
|
||
"limit": limit,
|
||
"rerank_model": rerank_model,
|
||
"embed_model": embed_model,
|
||
},
|
||
url="new/semantic_search",
|
||
)
|
||
|
||
return BMNewSemanticSearchOutput.parse_obj(result)
|
||
|
||
async def stream_search_in_law(
|
||
self, query: str, limit: int, rerank_model: str, embed_model: str
|
||
):
|
||
"""
|
||
فقط منطق – بدون هیچ وابستگی به Bale یا User
|
||
"""
|
||
async for data in self.request_manager.stream_result(
|
||
payload={
|
||
"query": query,
|
||
"limit": limit,
|
||
"rerank_model": rerank_model,
|
||
"embed_model": embed_model,
|
||
},
|
||
url="new/semantic_search",
|
||
):
|
||
yield data
|
||
|
||
async def stream_rule_making(self, query, llm_name, effort):
|
||
async for data in self.request_manager.stream_result(
|
||
payload={
|
||
"query": query,
|
||
"query_id": "qs12357498",
|
||
"llm_effort": effort,
|
||
"llm_model_name": llm_name,
|
||
# "llm_api_url"
|
||
# "llm_api_key"
|
||
},
|
||
url="/single/rule_making",
|
||
):
|
||
|
||
yield data
|
||
|
||
async def stream_chat_in_law(self, query, limit, effort, mode_type="bale"):
|
||
async for data in self.request_manager.stream_result(
|
||
payload={
|
||
"section_content": query,
|
||
"effort": effort,
|
||
"limit": limit,
|
||
"mode_type": mode_type,
|
||
},
|
||
url="/single/semantic_search/run_chat",
|
||
):
|
||
|
||
yield data
|
||
|
||
async def stream_rule_semantic_search(
|
||
self,
|
||
queries: List,
|
||
filter_qanon_ids: List,
|
||
limit_rerank: int,
|
||
embed_model="jinaai/jina-colbert-v2",
|
||
rerank_model="BAAI/bge-reranker-v2-m3",
|
||
metadata={},
|
||
limit_cos=100,
|
||
):
|
||
async for data in self.request_manager.stream_result(
|
||
payload={
|
||
"queries": queries,
|
||
"filter_qanon_ids": filter_qanon_ids,
|
||
"embed_model": embed_model,
|
||
"rerank_model": rerank_model,
|
||
"metadata": metadata,
|
||
"limit_rerank": limit_rerank,
|
||
"limit_cos": limit_cos,
|
||
},
|
||
url="/single/rule_semantic_search",
|
||
):
|
||
|
||
yield data
|
||
|
||
async def chat_in_law(self, query, effort, limit, mode_type="bale") -> ChatLaw:
|
||
result = await self.request_manager.get_result(
|
||
payload={
|
||
"section_content": query,
|
||
"effort": effort,
|
||
"limit": limit,
|
||
"mode_type": mode_type,
|
||
},
|
||
url="/single/semantic_search/run_chat",
|
||
)
|
||
print(f"chat_in_law {result}")
|
||
return ChatLaw.parse_obj(result)
|
||
|
||
async def title_repeated(
|
||
self,
|
||
qanontitle,
|
||
search_range: int = 10,
|
||
# url=f"http://localhost:8010/v1/indices/qaqanon/search",
|
||
url=f"http://localhost/api/elp/v1/indices/qaqanon/search",
|
||
) -> List[TitleRepeat]:
|
||
"""
|
||
- باید با سرویس از حاج آقا گرفته شود
|
||
Fetch similar titles from the custom Elasticsearch-like REST API.
|
||
"""
|
||
# "/majles/similar/title/qaqanon/0/10/none"
|
||
# headers["Authorization"]="GuestAccess"
|
||
headers = {"accept": "application/json", "Content-Type": "application/json"}
|
||
|
||
body = {
|
||
"query": qanontitle, #
|
||
"from_": 0,
|
||
"size": search_range + 10,
|
||
"track_total_hits": True,
|
||
}
|
||
|
||
response = requests.request("POST", url, headers=headers, json=body, timeout=20)
|
||
print(f"title_repeated -> {response}")
|
||
if response.status_code != 200:
|
||
print("ERROR:", response.status_code)
|
||
print(response.text)
|
||
else:
|
||
data = response.json()
|
||
ids = []
|
||
# print('---------------------------------------> max_score', max_score)
|
||
# print(data["hits"])
|
||
|
||
for i in data["hits"]["hits"]:
|
||
title = i["_source"]["title"]
|
||
ids.append(
|
||
TitleRepeat(
|
||
title=title,
|
||
id=i["_source"]["id"],
|
||
score=wer_nltk(exist=title, new=qanontitle),
|
||
)
|
||
)
|
||
|
||
return sorted(ids, key=lambda x: x.score, reverse=True)[:search_range]
|
||
|
||
async def talk(self, query) -> str:
|
||
result = await self.request_manager.get_result(
|
||
payload={
|
||
"user_input": query,
|
||
},
|
||
url="/talk",
|
||
)
|
||
return result
|
||
|
||
async def conflict_qanon_asasi_low(self, query, effort, limit, mode_type="bale"):
|
||
async for data in self.request_manager.stream_result(
|
||
payload={
|
||
"section_content": query,
|
||
"effort": effort,
|
||
"limit": limit,
|
||
"mode_type": mode_type,
|
||
},
|
||
url="/new/conflict/constitution_low",
|
||
):
|
||
yield data
|
||
|
||
async def conflict_qanon_asasi_steps(self, query, effort, limit, mode_type="bale"):
|
||
_result = await self.request_manager.get_result(
|
||
payload={
|
||
"section_content": query,
|
||
"effort": effort,
|
||
"limit": limit,
|
||
"mode_type": mode_type,
|
||
},
|
||
url="/new/conflict/constitution",
|
||
)
|
||
|
||
return _result
|
||
|
||
async def stream_logical_chat_in_law(self, query, effort, metadata, limit):
|
||
async for data in self.request_manager.stream_result(
|
||
payload={
|
||
"section_content": query,
|
||
"effort": effort,
|
||
"metadata": metadata,
|
||
"limit":limit
|
||
},
|
||
url="/new/stream/chat_logical",
|
||
):
|
||
yield data
|
||
|
||
async def conflict_law_writing_policy(self, query, effort):
|
||
_result = await self.request_manager.get_result(
|
||
payload={
|
||
"section_content": query,
|
||
"effort": effort,
|
||
},
|
||
url="/conflict/law_writing_policy",
|
||
)
|
||
return _result
|