811 lines
34 KiB
Python
811 lines
34 KiB
Python
|
from normalizer import Normalizer
|
|||
|
from tokenizer import *
|
|||
|
import jalali
|
|||
|
import re
|
|||
|
from re import sub
|
|||
|
import textwrap
|
|||
|
from html import escape
|
|||
|
from bs4 import BeautifulSoup
|
|||
|
|
|||
|
# from lxml import etree
|
|||
|
import datetime
|
|||
|
#enumerate(token_list):
|
|||
|
_normalizer = Normalizer(date_normalizing_needed=True)
|
|||
|
|
|||
|
yeAr = r"ﻱ|ې|ێ|ے|ى|ي|ئ"
|
|||
|
yeFr= r"ی"
|
|||
|
keAr = r"ڭ|ﻚ|ﮎ|ﻜ|ﮏ|ګ|ﻛ|ﮑ|ﮐ|ڪ|ك"
|
|||
|
keFr = r"ک"
|
|||
|
mark1 = r'#\[#'
|
|||
|
mark2 = r'#\]#'
|
|||
|
hTag1 = r'<'
|
|||
|
hTag2 = r'>'
|
|||
|
tableTag=["table","tr", "th", "td", "TABLE", "TR", "TH", "TD"]
|
|||
|
strTable = ''
|
|||
|
for tag in tableTag:
|
|||
|
if strTable != '':
|
|||
|
strTable += '|'
|
|||
|
strTable += '('+tag+')'
|
|||
|
regTable = r'<(?P<slash>\/)*(?P<tag>'+strTable+')(?P<class>[^>]+)*>'
|
|||
|
regTableReplace = r'#[#\g<slash>\g<tag>\g<class>#]#'
|
|||
|
|
|||
|
|
|||
|
def isNeedHtml(html):
|
|||
|
if "<TABLE" in html or "<table" in html or "</TR" in html or "</tr" in html :
|
|||
|
return True
|
|||
|
else:
|
|||
|
return False
|
|||
|
|
|||
|
|
|||
|
def removeHtmlTags(html, exeptionTag=[]):
|
|||
|
#reg1 = r'<[^>]+>'
|
|||
|
|
|||
|
if exeptionTag.__len__ :
|
|||
|
exceptTags = ''
|
|||
|
for tag in exeptionTag:
|
|||
|
if exceptTags != '':
|
|||
|
exceptTags += '|'
|
|||
|
exceptTags += '('+tag+')'
|
|||
|
reg1 = r'<(?P<slash>/)*(?P<tag>'+exceptTags+')(?P<class>[^>]+)*>'
|
|||
|
html = sub(reg1, regTableReplace, html)
|
|||
|
|
|||
|
|
|||
|
soup = BeautifulSoup(html, "html.parser")
|
|||
|
text = soup.get_text("\n", strip=True)
|
|||
|
|
|||
|
if exeptionTag.__len__ :
|
|||
|
text = sub(mark1, hTag1, text)
|
|||
|
text = sub(mark2, hTag2, text)
|
|||
|
|
|||
|
return text
|
|||
|
|
|||
|
|
|||
|
def removeHtmlNoTableTag(html):
|
|||
|
|
|||
|
# خطا داره و هنگ می کنه در test2.py
|
|||
|
html = sub(regTable, regTableReplace, html)
|
|||
|
soup = BeautifulSoup(html, "html.parser")
|
|||
|
text = soup.get_text("\n", strip=True)
|
|||
|
|
|||
|
text = sub(mark1, hTag1, text)
|
|||
|
text = sub(mark2, hTag2, text)
|
|||
|
|
|||
|
return text
|
|||
|
|
|||
|
def normalizerData(data):
|
|||
|
global _normalizer
|
|||
|
normalTitle, dates = _normalizer.normalize(data, return_dates=True)
|
|||
|
tdates = []
|
|||
|
for d in dates:
|
|||
|
if not d.startswith("num"):
|
|||
|
try:
|
|||
|
tsd = jdate2timestamp(d)
|
|||
|
cdate = d
|
|||
|
except:
|
|||
|
try:
|
|||
|
d = d.replace("y", "")
|
|||
|
d = d.replace("m", "/")
|
|||
|
d = d.replace("d", "/")
|
|||
|
m = re.match(r"^(\d{4})\D(\d{1,2})\D(\d{1,2})$", d)
|
|||
|
if m:
|
|||
|
[year, month, day] = [
|
|||
|
int(m.group(1)),
|
|||
|
int(m.group(2)),
|
|||
|
int(m.group(3)),
|
|||
|
]
|
|||
|
if year > 1200 and year < 1550:
|
|||
|
if month < 1 or month > 12:
|
|||
|
month = 1
|
|||
|
if day < 1 or day > 31:
|
|||
|
day = 1
|
|||
|
cdate = str(year) + "/" + str(month) + "/" + str(day)
|
|||
|
tsd = jdate2timestamp(cdate)
|
|||
|
else:
|
|||
|
# cdate = "1403/03/03"
|
|||
|
# tsd = jdate2timestamp(cdate)
|
|||
|
continue
|
|||
|
else:
|
|||
|
# cdate = "1403/03/03"
|
|||
|
# tsd = jdate2timestamp(cdate)
|
|||
|
continue
|
|||
|
except:
|
|||
|
# print("Error in:"+ d +" for id: " + id)
|
|||
|
# cdate = "1403/03/03"
|
|||
|
# tsd = jdate2timestamp(cdate)
|
|||
|
continue
|
|||
|
tdates.append({"date": cdate, "timestamp": tsd, "index": 0, "slice": ""})
|
|||
|
|
|||
|
return normalTitle,tdates
|
|||
|
|
|||
|
def normalizerDate2(inputString):
|
|||
|
global _normalizer
|
|||
|
normalizedString, dates, recognized_dates, recognized_numbers = _normalizer.normalize(inputString, return_dates=True)
|
|||
|
tdates = []
|
|||
|
for date_item in recognized_dates:
|
|||
|
date_part = date_item['date']
|
|||
|
date_token_index = date_item['date_token_index']
|
|||
|
start_date_token_index = date_item['start_date_token_index']
|
|||
|
end_date_token_index = date_item['end_date_token_index']
|
|||
|
if not date_part.startswith("num"):
|
|||
|
try:
|
|||
|
cdate = date_part
|
|||
|
tsd = jdate2timestamp(date_part)
|
|||
|
except:
|
|||
|
try:
|
|||
|
date_part = date_part.replace("y", "")
|
|||
|
date_part = date_part.replace("m", "/")
|
|||
|
date_part = date_part.replace("d", "/")
|
|||
|
m = re.match(r"^(\d{4})\D(\d{1,2})\D(\d{1,2})$", date_part)
|
|||
|
if m:
|
|||
|
[year, month, day] = [
|
|||
|
int(m.group(1)),
|
|||
|
int(m.group(2)),
|
|||
|
int(m.group(3)),
|
|||
|
]
|
|||
|
if year > 1200 and year < 1550:
|
|||
|
if month < 1 or month > 12:
|
|||
|
month = 1
|
|||
|
if day < 1 or day > 31:
|
|||
|
day = 1
|
|||
|
cdate = str(year) + "/" + str(month) + "/" + str(day)
|
|||
|
tsd = jdate2timestamp(cdate)
|
|||
|
else:
|
|||
|
# cdate = "1403/03/03"
|
|||
|
# tsd = jdate2timestamp(cdate)
|
|||
|
continue
|
|||
|
# else:
|
|||
|
# # cdate = "1403/03/03"
|
|||
|
# # tsd = jdate2timestamp(cdate)
|
|||
|
# continue
|
|||
|
except:
|
|||
|
# print("Error in:"+ d +" for id: " + id)
|
|||
|
# cdate = "1403/03/03"
|
|||
|
# tsd = jdate2timestamp(cdate)
|
|||
|
continue
|
|||
|
import tokenizer as t
|
|||
|
inputString_token = t.Tokenizer.tokenize_words(None,inputString)
|
|||
|
# if start_date_token_index == end_date_token_index:
|
|||
|
# end_date_token_index += 1
|
|||
|
# original_date_part = inputString_token[start_date_token_index:end_date_token_index]
|
|||
|
# else:
|
|||
|
original_date_part = inputString_token[start_date_token_index:end_date_token_index + 1]
|
|||
|
original_date = ''
|
|||
|
for part in original_date_part:
|
|||
|
original_date = original_date + ' ' + part
|
|||
|
original_date = original_date.strip()
|
|||
|
tdates.append({"converted_date": date_item['date'],
|
|||
|
"date": cdate ,
|
|||
|
"original_date" : original_date,
|
|||
|
# "timestamp": tsd,
|
|||
|
"date_token_index": date_token_index,
|
|||
|
"start_date_token_index": start_date_token_index,
|
|||
|
"end_date_token_index":end_date_token_index})
|
|||
|
'''
|
|||
|
for d in dates:
|
|||
|
if not d.startswith("num"):
|
|||
|
try:
|
|||
|
tsd = jdate2timestamp(d)
|
|||
|
cdate = d
|
|||
|
except:
|
|||
|
try:
|
|||
|
d = d.replace("y", "")
|
|||
|
d = d.replace("m", "/")
|
|||
|
d = d.replace("d", "/")
|
|||
|
m = re.match(r"^(\d{4})\D(\d{1,2})\D(\d{1,2})$", d)
|
|||
|
if m:
|
|||
|
[year, month, day] = [
|
|||
|
int(m.group(1)),
|
|||
|
int(m.group(2)),
|
|||
|
int(m.group(3)),
|
|||
|
]
|
|||
|
if year > 1200 and year < 1550:
|
|||
|
if month < 1 or month > 12:
|
|||
|
month = 1
|
|||
|
if day < 1 or day > 31:
|
|||
|
day = 1
|
|||
|
cdate = str(year) + "/" + str(month) + "/" + str(day)
|
|||
|
tsd = jdate2timestamp(cdate)
|
|||
|
else:
|
|||
|
# cdate = "1403/03/03"
|
|||
|
# tsd = jdate2timestamp(cdate)
|
|||
|
continue
|
|||
|
else:
|
|||
|
# cdate = "1403/03/03"
|
|||
|
# tsd = jdate2timestamp(cdate)
|
|||
|
continue
|
|||
|
except:
|
|||
|
# print("Error in:"+ d +" for id: " + id)
|
|||
|
# cdate = "1403/03/03"
|
|||
|
# tsd = jdate2timestamp(cdate)
|
|||
|
continue
|
|||
|
tdates.append({"date": cdate, "timestamp": tsd, "index": 0, "slice": ""})'''
|
|||
|
return normalizedString,tdates,recognized_numbers
|
|||
|
|
|||
|
def OtherDateFormatNormalizer(inputString,pattern):
|
|||
|
mainTextTemp = inputString
|
|||
|
regex_pattern_Mah = r"y(\d{1,4})m(\d{1,2})d(\d{1,2})\sماه\s(\d{1,4})\sو\s(\d{1,3})\sو\s(\d{1,2})\sو\s(\d{1})\s" # y0m4d4 ماه 1000 و 300 و 50 و 4
|
|||
|
regex_pattern_MahSal = r"y(\d{1,4})m(\d{1,2})d(\d{1,2})\sماه\sسال\s(\d{1,4})\sو\s(\d{1,3})\sو\s(\d{1,2})\sو\s(\d{1})\s" # y0m4d4 ماه سال 1000 و 300 و 50 و 4
|
|||
|
regex_pattern_MahSal2 = r"y(\d{1,4})m(\d{1,2})d(\d{1,2})\sماه\sسال\sy(\d{1,4})m(\d{1,2})d(\d{1,2})\sو\s(\d{1,3})\sو\s(\d{1,2})\sو\s(\d{1})\s" # y0m4d4 ماه سال y1000m0d0 و 300 و 50 و 4
|
|||
|
regex_pattern_MahSal3 = r"y(\d{1,4})m(\d{1,2})d(\d{1,2})\sماه\sسال\sy(\d{1,4})m(\d{1,2})d(\d{1,2})" # y0m3d1 ماه سال y1353m0d0
|
|||
|
|
|||
|
if(pattern==1):
|
|||
|
regex = re.compile(regex_pattern_Mah)
|
|||
|
elif(pattern==2):
|
|||
|
regex = re.compile(regex_pattern_MahSal)
|
|||
|
elif(pattern==3):
|
|||
|
regex = re.compile(regex_pattern_MahSal2)
|
|||
|
elif(pattern==4):
|
|||
|
regex = re.compile(regex_pattern_MahSal3)
|
|||
|
|
|||
|
matches = regex.finditer(inputString)
|
|||
|
for match in matches:
|
|||
|
foundedPattern = match.group()
|
|||
|
foundedPatternTemp = match.group()
|
|||
|
if(pattern==1):
|
|||
|
foundedPattern = foundedPattern.replace('ماه','')
|
|||
|
else:
|
|||
|
foundedPattern = foundedPattern.replace('سال','')
|
|||
|
foundedPattern = foundedPattern.replace('ماه','')
|
|||
|
foundedPattern = foundedPattern.strip()
|
|||
|
tempString = foundedPattern
|
|||
|
standardDatePattern = r"y(\d{1,4})m(\d{1,2})d(\d{1,2})"
|
|||
|
#regex = re.compile(regex_pattern_Mah)
|
|||
|
matchItems = re.finditer(standardDatePattern,tempString)
|
|||
|
for item in matchItems:
|
|||
|
tempPattern = item.group()
|
|||
|
tempString = tempString.replace(tempPattern,'')
|
|||
|
tempString = tempString.strip()
|
|||
|
tempString = tempString.replace('و','')
|
|||
|
tempString = tempString.strip()
|
|||
|
tempArray = tempString.split()
|
|||
|
year = 0
|
|||
|
for item in tempArray:
|
|||
|
dateMatch = re.finditer(standardDatePattern,item)
|
|||
|
regexFlag = True
|
|||
|
for dateItem in dateMatch:
|
|||
|
yearStr = dateItem.group()[1:5]
|
|||
|
year += int(yearStr)
|
|||
|
regexFlag = False
|
|||
|
break
|
|||
|
if(item.isalnum() and regexFlag):
|
|||
|
year += int(item)
|
|||
|
tempPattern = tempPattern.replace('y0','y'+str(year))
|
|||
|
mainTextTemp = mainTextTemp.replace(foundedPatternTemp,tempPattern+' ')
|
|||
|
return mainTextTemp
|
|||
|
|
|||
|
#foundedPattern = jdate2timestamp(foundedPattern)
|
|||
|
#convertedText = regex.sub(foundedPattern,convertedText)
|
|||
|
|
|||
|
def normalizerLongData(data):
|
|||
|
dates = []
|
|||
|
if len(data) > 10000:
|
|||
|
textParts = textwrap.wrap(data, 10000, break_long_words=False)
|
|||
|
for part in textParts:
|
|||
|
dates.extend(normalizerData(part))
|
|||
|
else:
|
|||
|
dates = normalizerData(data)
|
|||
|
return dates
|
|||
|
|
|||
|
# ##################
|
|||
|
# در ویندوز برای اعداد منفی که تاریخهای قبلی بود را خطا می داد
|
|||
|
# rr = gdt.timestamp()
|
|||
|
# #################
|
|||
|
def jdate2timestamp_old(dt):
|
|||
|
ndt = dt.replace("y", "")
|
|||
|
ndt = ndt.replace("m", "/")
|
|||
|
ndt = ndt.replace("d", "/")
|
|||
|
gd = jalali.Persian(ndt).gregorian_datetime()
|
|||
|
# print(gd)
|
|||
|
ztime = datetime.time(0, 0, 0, 0)
|
|||
|
gdt = datetime.datetime.combine(gd, ztime)
|
|||
|
# print(gdt)
|
|||
|
rr = gdt.timestamp()
|
|||
|
tst = int( round(rr) * 1000)
|
|||
|
return tst
|
|||
|
|
|||
|
def jdate2timestamp(dt):
|
|||
|
ndt = dt.replace("y", "")
|
|||
|
ndt = ndt.replace("m", "/")
|
|||
|
ndt = ndt.replace("d", "/")
|
|||
|
gd = jalali.Persian(ndt).gregorian_datetime()
|
|||
|
base = datetime.date(1970, 1, 1)
|
|||
|
rr = (gd-base).total_seconds()
|
|||
|
tst = int( round(rr) * 1000)
|
|||
|
return tst
|
|||
|
|
|||
|
|
|||
|
|
|||
|
def getSortTimestamp(ts_date):
|
|||
|
empty_date = -15000000000
|
|||
|
ts_ts = empty_date
|
|||
|
try:
|
|||
|
if ts_date != "":
|
|||
|
ts_ts = jdate2timestamp(ts_date)
|
|||
|
except:
|
|||
|
ts_ts = empty_date
|
|||
|
|
|||
|
return ts_ts
|
|||
|
|
|||
|
def normalize_content(content):
|
|||
|
text = normalYehKe(content)
|
|||
|
text = _normalizer.sub_alphabets(text)
|
|||
|
# کلماتی که با نیم فاصله از هم جدا شده اند، را به هم می چسباند
|
|||
|
# در این صورت، اگر با یک اسپیس جایگزین شود، یک توکن به متن اصلی اضافه می کند
|
|||
|
text = sub('\u200c','',text)
|
|||
|
pattern = r',|٬|٫|‚|,|؟|ʕ|_|ـ'
|
|||
|
text = sub(pattern,'', text)
|
|||
|
|
|||
|
if text.__contains__('\u200c'):
|
|||
|
print(text)
|
|||
|
|
|||
|
return text
|
|||
|
|
|||
|
def normalYehKe(text):
|
|||
|
if(text == None) :
|
|||
|
return ''
|
|||
|
|
|||
|
c1 = sub(yeAr, yeFr, text)
|
|||
|
c2 = sub(keAr, keFr, c1)
|
|||
|
c2 = c2.replace('\u00A0', '')
|
|||
|
return c2.strip()
|
|||
|
|
|||
|
_term_list = []
|
|||
|
def setTermList():
|
|||
|
global _term_list
|
|||
|
if(_term_list.__len__() > 0):
|
|||
|
return
|
|||
|
_term_list = [
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1285/07/14"),
|
|||
|
"end": jdate2timestamp("1287/04/2"),
|
|||
|
"term": "مجلس شورای ملی-دوره1",
|
|||
|
"term_number": 1,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1288/8/24"),
|
|||
|
"end": jdate2timestamp("1290/10/3"),
|
|||
|
"term": "مجلس شورای ملی-دوره2",
|
|||
|
"term_number": 2,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1293/9/14"),
|
|||
|
"end": jdate2timestamp("1294/8/21"),
|
|||
|
"term": "مجلس شورای ملی-دوره3",
|
|||
|
"term_number": 3,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1300/4/1"),
|
|||
|
"end": jdate2timestamp("1302/3/30"),
|
|||
|
"term": "مجلس شورای ملی-دوره4",
|
|||
|
"term_number": 4,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1302/11/22"),
|
|||
|
"end": jdate2timestamp("1304/11/22"),
|
|||
|
"term": "مجلس شورای ملی-دوره5",
|
|||
|
"term_number": 5,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1305/4/19"),
|
|||
|
"end": jdate2timestamp("1307/5/22"),
|
|||
|
"term": "مجلس شورای ملی-دوره6",
|
|||
|
"term_number": 6,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1307/7/19"),
|
|||
|
"end": jdate2timestamp("1309/8/14"),
|
|||
|
"term": "مجلس شورای ملی-دوره7",
|
|||
|
"term_number": 7,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1309/9/24"),
|
|||
|
"end": jdate2timestamp("1311/10/24"),
|
|||
|
"term": "مجلس شورای ملی-دوره8",
|
|||
|
"term_number": 8,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1311/12/24"),
|
|||
|
"end": jdate2timestamp("1314/1/24"),
|
|||
|
"term": "مجلس شورای ملی-دوره9",
|
|||
|
"term_number": 9,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1314/3/15"),
|
|||
|
"end": jdate2timestamp("1316/3/22"),
|
|||
|
"term": "مجلس شورای ملی-دوره10",
|
|||
|
"term_number": 10,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1316/6/20"),
|
|||
|
"end": jdate2timestamp("1318/6/27"),
|
|||
|
"term": "مجلس شورای ملی-دوره11",
|
|||
|
"term_number": 11,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1318/8/3"),
|
|||
|
"end": jdate2timestamp("1320/8/9"),
|
|||
|
"term": "مجلس شورای ملی-دوره12",
|
|||
|
"term_number": 12,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1320/8/22"),
|
|||
|
"end": jdate2timestamp("1322/9/1"),
|
|||
|
"term": "مجلس شورای ملی-دوره13",
|
|||
|
"term_number": 13,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1322/12/16"),
|
|||
|
"end": jdate2timestamp("1324/12/21"),
|
|||
|
"term": "مجلس شورای ملی-دوره14",
|
|||
|
"term_number": 14,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1326/4/25"),
|
|||
|
"end": jdate2timestamp("1328/5/6"),
|
|||
|
"term": "مجلس شورای ملی-دوره15",
|
|||
|
"term_number": 15,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1328/11/20"),
|
|||
|
"end": jdate2timestamp("1330/11/29"),
|
|||
|
"term": "مجلس شورای ملی-دوره16",
|
|||
|
"term_number": 16,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1331/2/7"),
|
|||
|
"end": jdate2timestamp("1332/8/28"),
|
|||
|
"term": "مجلس شورای ملی-دوره17",
|
|||
|
"term_number": 17,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1332/12/27"),
|
|||
|
"end": jdate2timestamp("1335/1/26"),
|
|||
|
"term": "مجلس شورای ملی-دوره18",
|
|||
|
"term_number": 18,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1335/3/10"),
|
|||
|
"end": jdate2timestamp("1339/3/29"),
|
|||
|
"term": "مجلس شورای ملی-دوره19",
|
|||
|
"term_number": 19,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1339/12/2"),
|
|||
|
"end": jdate2timestamp("1340/2/19"),
|
|||
|
"term": "مجلس شورای ملی-دوره20",
|
|||
|
"term_number": 20,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1342/7/14"),
|
|||
|
"end": jdate2timestamp("1346/7/13"),
|
|||
|
"term": "مجلس شورای ملی-دوره21",
|
|||
|
"term_number": 21,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1346/7/14"),
|
|||
|
"end": jdate2timestamp("1350/6/9"),
|
|||
|
"term": "مجلس شورای ملی-دوره22",
|
|||
|
"term_number": 22,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1350/6/9"),
|
|||
|
"end": jdate2timestamp("1354/6/16"),
|
|||
|
"term": "مجلس شورای ملی-دوره23",
|
|||
|
"term_number": 23,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1354/6/17"),
|
|||
|
"end": jdate2timestamp("1357/11/20"),
|
|||
|
"term": "مجلس شورای ملی-دوره24",
|
|||
|
"term_number": 24,
|
|||
|
"majles_name": "شورای ملی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1359/3/7"),
|
|||
|
"end": jdate2timestamp("1363/3/6"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره1",
|
|||
|
"term_number": 1,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1363/3/7"),
|
|||
|
"end": jdate2timestamp("1367/3/6"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره2",
|
|||
|
"term_number": 2,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1367/3/7"),
|
|||
|
"end": jdate2timestamp("1371/3/6"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره3",
|
|||
|
"term_number": 3,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1371/3/7"),
|
|||
|
"end": jdate2timestamp("1375/3/11"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره4",
|
|||
|
"term_number": 4,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1375/3/12"),
|
|||
|
"end": jdate2timestamp("1379/3/6"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره5",
|
|||
|
"term_number": 5,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1379/3/7"),
|
|||
|
"end": jdate2timestamp("1383/3/6"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره6",
|
|||
|
"term_number": 6,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1383/3/7"),
|
|||
|
"end": jdate2timestamp("1387/3/6"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره7",
|
|||
|
"term_number": 7,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1387/3/7"),
|
|||
|
"end": jdate2timestamp("1391/3/6"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره8",
|
|||
|
"term_number": 8,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1391/3/7"),
|
|||
|
"end": jdate2timestamp("1395/3/7"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره9",
|
|||
|
"term_number": 9,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1395/3/8"),
|
|||
|
"end": jdate2timestamp("1399/3/6"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره10",
|
|||
|
"term_number": 10,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
{
|
|||
|
"begin": jdate2timestamp("1399/3/7"),
|
|||
|
"end": jdate2timestamp("1403/3/6"),
|
|||
|
"term": "مجلس شورای اسلامی-دوره11",
|
|||
|
"term_number": 11,
|
|||
|
"majles_name": "شورای اسلامی",
|
|||
|
},
|
|||
|
]
|
|||
|
|
|||
|
|
|||
|
def getTermQanon(ts_date_timestamp, ts_ref):
|
|||
|
setTermList()
|
|||
|
global _term_list
|
|||
|
term = ""
|
|||
|
term_number = 0
|
|||
|
majles_name = ""
|
|||
|
|
|||
|
if ts_ref == "هيات وزيران (دوره فترت)":
|
|||
|
term = ts_ref
|
|||
|
if ts_ref == "نخست وزير (مصدق)":
|
|||
|
term = ts_ref
|
|||
|
if ts_ref == "وزير عدليه (داور)":
|
|||
|
term = ts_ref
|
|||
|
if ts_ref == "شوراي انقلاب جمهوري اسلامي ايران":
|
|||
|
term = ts_ref
|
|||
|
|
|||
|
majles_name = term
|
|||
|
if term == "":
|
|||
|
for i in range(len(_term_list) - 1, -1, -1):
|
|||
|
begin = _term_list[i]["begin"]
|
|||
|
end = _term_list[i]["end"]
|
|||
|
if ts_date_timestamp >= begin and ts_date_timestamp <= end:
|
|||
|
term = _term_list[i]["term"]
|
|||
|
term_number = _term_list[i]["term_number"]
|
|||
|
majles_name = _term_list[i]["majles_name"]
|
|||
|
break
|
|||
|
|
|||
|
error = ""
|
|||
|
if term == "":
|
|||
|
# if ts_date_timestamp >= _term_list[0]["begin"] and ts_date_timestamp <= _term_list[len(_term_list)-1]["end"] :
|
|||
|
if ts_date_timestamp <= _term_list[len(_term_list) - 1]["end"]:
|
|||
|
for i in range(0, len(_term_list) - 1, 1):
|
|||
|
end = _term_list[i]["end"]
|
|||
|
if ts_date_timestamp <= end:
|
|||
|
term = _term_list[i]["term"]
|
|||
|
term_number = _term_list[i]["term_number"]
|
|||
|
majles_name = _term_list[i]["majles_name"]
|
|||
|
error = "تاریخ بین دو دوره"
|
|||
|
break
|
|||
|
else:
|
|||
|
term_number = -1
|
|||
|
error = "تاریخ خارج از محدوده"
|
|||
|
|
|||
|
return term, term_number, majles_name, error
|
|||
|
|
|||
|
# این متد یک متن و ایندکس آغاز و پایان یک عبارت درون آن متن را دریافت می کند
|
|||
|
# و شماره توکن آغازین و توکن پایانی مربوط به عبارت در متن را بر می گرداند
|
|||
|
def token_state_finder(normalized_section_content, start_index, end_index):
|
|||
|
before_substring = normalized_section_content[0:start_index-1].strip()
|
|||
|
pattern_substring = normalized_section_content[start_index-1:end_index+1].strip()
|
|||
|
before_substring_token_list = before_substring.strip().split()
|
|||
|
pattern_token_list = pattern_substring.strip().split()
|
|||
|
start_token_state = len(before_substring_token_list)
|
|||
|
end_token_state = len(before_substring_token_list) + (len(pattern_token_list)-1)
|
|||
|
pattern_tokens_state ={
|
|||
|
"start_token_state": start_token_state,
|
|||
|
"end_token_state" : end_token_state
|
|||
|
}
|
|||
|
return pattern_tokens_state
|
|||
|
|
|||
|
def find_number_indexes_in_string(normalized_string,recognized_numbers):
|
|||
|
complete_recognized_numbers = []
|
|||
|
for item in recognized_numbers:
|
|||
|
number_start_index, number_end_index = find_token_indexes_in_string(normalized_string,item['start_token_index'],item['end_token_index'])
|
|||
|
content = normalized_string.split()
|
|||
|
# if item['start_token_index']==item['end_token_index']:
|
|||
|
# # حذف این بخش و باقی گذاشتن دستور ذیل الز زیر در کفایت درست کار کردن متد بررسی شود
|
|||
|
|
|||
|
# number_token_list = content[item['start_token_index']]
|
|||
|
# else:
|
|||
|
number_token_list = content[item['start_token_index']:item['end_token_index']+1]
|
|||
|
complete_recognized_numbers.append(
|
|||
|
{
|
|||
|
'number_value' : item['number_value'],
|
|||
|
'number_token_list' : number_token_list,
|
|||
|
'start_token_index' : item['start_token_index'],
|
|||
|
'end_token_index' : item['end_token_index'],
|
|||
|
"start_number_state": number_start_index,
|
|||
|
"end_number_state" : number_end_index
|
|||
|
}
|
|||
|
)
|
|||
|
return complete_recognized_numbers
|
|||
|
|
|||
|
# این متد متن اصلی یک متن، توکن آغازین و توکن پایانی مربوط به یک عبارت را می گیرد
|
|||
|
# و ایندکس آغاز و ایندکس پایان متن وارد شده را بر می گرداند
|
|||
|
def find_token_indexes_in_string(normalized_string,start_token_state,end_token_state):
|
|||
|
before_tokens = normalized_string.split()[0:start_token_state]
|
|||
|
content_tokens = normalized_string.split()[start_token_state:end_token_state + 1]
|
|||
|
content_start_index = 0
|
|||
|
content_end_index = 0
|
|||
|
# شمردن تعداد کاراکترهای هر توکن در لیست توکن قبل از عدد
|
|||
|
for token in before_tokens:
|
|||
|
content_start_index += len(token)
|
|||
|
# اضافه کردن تعداد فاصله های خالی یا همان اسپیس به عدد ایندکس شروع عدد
|
|||
|
content_start_index += len(before_tokens) + 1
|
|||
|
|
|||
|
# شمردن تعداد کاراکترهای هر توکن در لیست توکن مربوط به عدد
|
|||
|
for token in content_tokens:
|
|||
|
content_end_index += len(token)
|
|||
|
# اضافه کردن تعداد فاصله های خالی یا همان اسپیس به عدد ایندکس پایان عدد
|
|||
|
content_end_index += (content_start_index - 1) + (len(content_tokens) - 1)
|
|||
|
|
|||
|
return content_start_index, content_end_index
|
|||
|
|
|||
|
# این متد، متنی را دریافت می کند و الگوهای تعریف شده را در آن جستجو می کند و آرایه ای از عبارات مطابق با هر الگو،
|
|||
|
# شماره ایندکس شروع و پایان هر عبارت، عنوان و محتوای الگو، و شماره توکن شروع و توکن پایانی هر عبارت
|
|||
|
# پیدا شده را بر می گرداند
|
|||
|
def regex_patterns_finder(sectoin_content):
|
|||
|
regex_patterns = {
|
|||
|
"asle N asasi": r"اصل\s*شماره\s*(\d+)\s*قانون\s*اساسی\s*جمهوری\s*اسلامی\s*ایران", # اصل شماره فلان قانون اساسی جمهوری اسلامی ایران
|
|||
|
"qanone asasi": r"(?<!^)\b\sقانون\sاساسی\sجمهوری\sاسلامی\sایران", # قانون اساسی جمهوری اسلامی ایران که در اول پاراگراف نباشد
|
|||
|
"qanone asasi": r"(?<!^)\b\sقانون\sاساسی", # قانون اساسی که در اول پاراگراف نباشد
|
|||
|
"qanon * mosavvab tarikh" : r"\bقانون[\s\w/]*مصوب\s((y\d{2,4}m\d{1,2}d\d{1,2})|\d{2,4})", # قانون * مصوب تاریخ
|
|||
|
"in qanon" : r"این\sقانون", # این قانون
|
|||
|
"qanone foq" : r"قانون\sفوق", # قانون فوق
|
|||
|
"eslahe qanon": r"قانون\sاصلاح", # اصلاح قانون
|
|||
|
"tabsare foq" : r"تبصره\sفوق", # تبصره فوق
|
|||
|
"made foq" : r"ماده\sفوق", # ماده فوق
|
|||
|
"made vahede" : r"ماده\sواحده", # ماده واحده
|
|||
|
"made vahed" : r"ماده\sواحد", # ماده واحد
|
|||
|
"tabsare N" : r"^\bتبصره\s*شماره\s*(\d+)\s*", # تبصره شماره فلان که فقط اول پاراگراف باشد
|
|||
|
"f tabsare N" : r"(?<!^)\bتبصره\sشماره\s(\d+)\s", # تبصره شماره فلان که همه جا غیر از اول پاراگراف باشد
|
|||
|
"tabsare N" : r"(?<!^)\bتبصره ?\(? ?\d+? ?\)?[ :.]", # تبصره شماره فلان که همه جا غیر از اول پاراگراف باشد
|
|||
|
"made N" : r"(?<!^)\bماده ?\(? ?\d+? ?\)?[ :.]", # *** ماده فلان که هرجای پاراگراف غیر از اول آن باشد
|
|||
|
"f made N" : r"^\bماده\s*[(]?\s*(\d+)\s*[)]?\s*" # ماده فلان که فقط اول پاراگراف باشد با یا بدون ترکیب عدد با پرانتز
|
|||
|
}
|
|||
|
|
|||
|
matched_array = []
|
|||
|
for pattern_key,pattern_value in regex_patterns.items():
|
|||
|
regex = re.compile(pattern_value)
|
|||
|
matches = regex.finditer(sectoin_content)
|
|||
|
|
|||
|
for match in matches:
|
|||
|
# انجام عملیات مرتبط با هر الگو در اینجا
|
|||
|
founded_item = match.group()
|
|||
|
start_index = match.start() + 1
|
|||
|
end_index = match.end() - 1
|
|||
|
pattern_tokens_state = token_state_finder(sectoin_content, start_index, end_index)
|
|||
|
matched_array.append(
|
|||
|
{
|
|||
|
"founded_item" : founded_item,
|
|||
|
"start_index" : start_index,
|
|||
|
"end_index" : end_index,
|
|||
|
"pattern_key" : pattern_key,
|
|||
|
"pattern_value" : pattern_value,
|
|||
|
"start_token_state": pattern_tokens_state["start_token_state"],
|
|||
|
"end_token_state" : pattern_tokens_state["end_token_state"],
|
|||
|
}
|
|||
|
)
|
|||
|
# convertedText = regex.sub(' wwwwwwwww ',convertedText)
|
|||
|
# مرتب کردن آرایه بر اساس توکن شروع عبارت
|
|||
|
matched_array.sort(key=lambda x: int(x['start_token_state']), reverse=False)
|
|||
|
return matched_array
|
|||
|
|
|||
|
def change_refrece_tokens(normalized_section_content, recognized_patterns_array):
|
|||
|
token_list = normalized_section_content.strip().split()
|
|||
|
for ref_item in recognized_patterns_array:
|
|||
|
start_token_state = ref_item.get('start_token_state')
|
|||
|
end_token_state = ref_item.get('end_token_state')
|
|||
|
for i in range(start_token_state, end_token_state+1):
|
|||
|
token_list[i] = 'eeee'
|
|||
|
normalized_section_content = ''
|
|||
|
for token in token_list:
|
|||
|
normalized_section_content = ''.join([normalized_section_content, (' ' + token)])
|
|||
|
return normalized_section_content.strip()
|
|||
|
|
|||
|
def getMetaData(text):
|
|||
|
normalized_section_content, recognized_dates, recognized_numbers = normalizerDate2(text.strip())
|
|||
|
recognized_numbers = find_number_indexes_in_string(text,recognized_numbers)
|
|||
|
normalized_section_content = normalized_section_content.strip()
|
|||
|
recognized_patterns_array = regex_patterns_finder(normalized_section_content)
|
|||
|
normalized_section_content = change_refrece_tokens(normalized_section_content, recognized_patterns_array)
|
|||
|
nlp_parser = []
|
|||
|
date_list = recognized_dates
|
|||
|
ref_list = recognized_patterns_array
|
|||
|
for date_item in date_list:
|
|||
|
nlp_parser.append({
|
|||
|
"properties": {
|
|||
|
"type" : "date",
|
|||
|
"index_start": int(date_item['start_date_token_index']),
|
|||
|
"index_end" : int(date_item['end_date_token_index']),
|
|||
|
"text" : date_item['original_date'],
|
|||
|
"result" : date_item['converted_date'],
|
|||
|
#"timestamp" : date_item['timestamp'],
|
|||
|
"ref_link" : ''
|
|||
|
}
|
|||
|
})
|
|||
|
for ref_item in ref_list:
|
|||
|
nlp_parser.append({
|
|||
|
"properties": {
|
|||
|
"type" : "reference",
|
|||
|
"index_start" : int(ref_item['start_token_state']),
|
|||
|
"index_end" : int(ref_item['end_token_state']),
|
|||
|
"text" : ref_item['founded_item'],
|
|||
|
"result" : ref_item['pattern_value'],
|
|||
|
"ref_link" : ''
|
|||
|
}
|
|||
|
})
|
|||
|
return nlp_parser, normalized_section_content
|
|||
|
|
|||
|
def save_error(error_text,filename):
|
|||
|
with open(filename, 'a+', encoding='utf-8') as file:
|
|||
|
# نوشتن خطا در فایل
|
|||
|
file.write(error_text + '\n' + 50*'*' + '\n')
|
|||
|
|