mirror of
https://github.com/nhammer514/textfiles-politics.git
synced 2025-04-25 09:59:23 -04:00
added comments to main.py
This commit is contained in:
parent
b08a35eceb
commit
8fde2c8872
@ -30,50 +30,79 @@ def copyTextFiles(file):
|
|||||||
|
|
||||||
# Function runs through the tokens of given file. Entities are stored in array, then returned. Called by regexFile().
|
# Function runs through the tokens of given file. Entities are stored in array, then returned. Called by regexFile().
|
||||||
def entitycollector(tokens):
|
def entitycollector(tokens):
|
||||||
|
# creates a new file that includes all of the found entities.
|
||||||
with open('output.txt', 'w') as f:
|
with open('output.txt', 'w') as f:
|
||||||
entities = {}
|
entities = {}
|
||||||
|
# goes through each entity in the token list.
|
||||||
for ent in sorted(tokens.ents):
|
for ent in sorted(tokens.ents):
|
||||||
# if entity.label_ == "NORP" or entity.label_ == "LOC" or entity.label_=="GPE":
|
|
||||||
# ebb: The line helps experiment with different spaCy named entity classifiers, in combination if you like:
|
|
||||||
# When using it, remember to indent the next lines for the for loop.
|
|
||||||
# print(entity.text, entity.label_, spacy.explain(entity.label_))
|
|
||||||
entityInfo = [ent.text, ent.label_, spacy.explain(ent.label_)]
|
entityInfo = [ent.text, ent.label_, spacy.explain(ent.label_)]
|
||||||
stringify = str(entityInfo)
|
stringify = str(entityInfo)
|
||||||
f.write(stringify)
|
f.write(stringify)
|
||||||
f.write('\n')
|
f.write('\n')
|
||||||
# PRINT TO FILE
|
|
||||||
# entities.append(entity.text)
|
|
||||||
entities[ent.text] = ent.label_
|
entities[ent.text] = ent.label_
|
||||||
|
# return all entities with its label and text.
|
||||||
return entities
|
return entities
|
||||||
|
|
||||||
# Function runs regex through given file.
|
# Function runs regex through given file.
|
||||||
def regexFile(file):
|
def regexFile(file):
|
||||||
fileDir = os.path.join(outputPath, file)
|
fileDir = os.path.join(outputPath, file)
|
||||||
with PySaxonProcessor(license=False) as proc:
|
with PySaxonProcessor(license=False) as proc:
|
||||||
# grabs the original xml file and stores it in a variable for later.
|
# grabs the original xml file and stores it in a variable for later. this some xquery bs
|
||||||
xml = open(fileDir, encoding='utf-8').read()
|
xml = open(fileDir, encoding='utf-8').read()
|
||||||
xp = proc.new_xpath_processor()
|
xp = proc.new_xpath_processor()
|
||||||
node = proc.parse_xml(xml_text=xml)
|
node = proc.parse_xml(xml_text=xml)
|
||||||
xp.set_context(xdm_item=node)
|
xp.set_context(xdm_item=node)
|
||||||
|
|
||||||
|
# xquery goes through original text, and stores it all in a single string.
|
||||||
xpath = xp.evaluate('//p ! normalize-space() => string-join()')
|
xpath = xp.evaluate('//p ! normalize-space() => string-join()')
|
||||||
string = xpath.__str__()
|
string = xpath.__str__()
|
||||||
|
|
||||||
|
# regex goes through the text and deletes anything that is not a letter or space.
|
||||||
cleanedText = regex.sub('[^A-z]+', ' ', string)
|
cleanedText = regex.sub('[^A-z]+', ' ', string)
|
||||||
|
|
||||||
|
# gets the tokens of the clean text.
|
||||||
tokens = nlp(cleanedText)
|
tokens = nlp(cleanedText)
|
||||||
|
|
||||||
wrappedText = xml
|
wrappedText = xml
|
||||||
|
# grabs all the entities in file and stores it in a list/array.
|
||||||
listEntities = entitycollector(tokens)
|
listEntities = entitycollector(tokens)
|
||||||
#print(listEntities)
|
# if anything exists in the list, the following code will run.
|
||||||
if listEntities:
|
if listEntities:
|
||||||
|
# it will check through each entity in the list and see its entity type. it is looking for "PERSON" tokens
|
||||||
|
# in this instance, which includes of nouns and names.
|
||||||
for entity in listEntities.keys():
|
for entity in listEntities.keys():
|
||||||
#print(entity, listEntities[entity])
|
|
||||||
if listEntities[entity] == "PERSON":
|
if listEntities[entity] == "PERSON":
|
||||||
|
# key_template variable is the elements we wrap around found instances.
|
||||||
key_template = "<ent type = 'person'>" + entity + "</ent>"
|
key_template = "<ent type = 'person'>" + entity + "</ent>"
|
||||||
|
# loops through wrappedText until all entities are wrapped.
|
||||||
wrappedText = wrappedText.replace(entity, key_template)
|
wrappedText = wrappedText.replace(entity, key_template)
|
||||||
# Saves newly wrapped elements and then writes it into the copied file.
|
# Saves newly wrapped elements and then writes it into new file.
|
||||||
with open(fileDir, 'w', encoding='utf8') as f:
|
with open(fileDir, 'w', encoding='utf8') as f:
|
||||||
f.write(wrappedText)
|
f.write(wrappedText)
|
||||||
print("WRAPPING " + entity)
|
print("WRAPPING " + entity)
|
||||||
|
|
||||||
|
# This part of the code does not run. It is a WIP.
|
||||||
|
## It tries to find weird or invalid elements/tags and fix them.
|
||||||
|
def checkTags(file):
|
||||||
|
content = []
|
||||||
|
fileDir = os.path.join(outputPath, file)
|
||||||
|
|
||||||
|
with open(fileDir, 'r', encoding='utf8') as inFile:
|
||||||
|
for line in inFile:
|
||||||
|
content.append(line)
|
||||||
|
# With the contents copied, a loop will go through the array and write it all in a new file in output folder.
|
||||||
|
with open(fileDir, 'w', encoding='utf8') as f:
|
||||||
|
for line in content:
|
||||||
|
match1 = regex.search("(<ent type = 'person'>){2,}(.+?)</ent>", line)
|
||||||
|
if match:
|
||||||
|
print("broken line found, fixing...")
|
||||||
|
newLine = regex.sub("(<ent type = 'person'>){2,}(.+?)</ent>",r"\1 \2",line)
|
||||||
|
print(line + "\n INTO.")
|
||||||
|
print(newLine)
|
||||||
|
|
||||||
|
|
||||||
for file in insideDir:
|
for file in insideDir:
|
||||||
copyTextFiles(file)
|
copyTextFiles(file)
|
||||||
regexFile(file)
|
regexFile(file)
|
||||||
|
#checkTags(file)
|
||||||
print("File checking finished.")
|
print("File checking finished.")
|
Loading…
x
Reference in New Issue
Block a user