diff --git a/andReplacer.py b/andReplacer.py new file mode 100644 index 0000000..ce9c107 --- /dev/null +++ b/andReplacer.py @@ -0,0 +1,81 @@ + + +import lines + +from lines import author + +author=author.author() + +# ___________________________________ +def andReplacer(): + """ + encapsulate author program in a + function + """ + # imports + + import nltk + import re + + + # file + + f_name="BibTex.bib" + + def opener(f_name): + file = open(f_name, 'r', encoding="utf8") + return file + + file=opener(f_name) + + # end prep + + endline='' + file = opener(f_name) + for line in file: + if (re.search('^author',line)): + # if (re.search(' and ',line)): + starter=line + line=line.lstrip('author = {') + line=line.rstrip('},\n') + + # line=line.lstrip(' and ') + # line=line.rstrip('') + + # splitter = re.compile(r'(\sand+|\Sand') + lineList=re.split(' and ', line) + # print(line2) + # splitter = re.compile(r' and ') + # splitter.findall(line) + # print(line) + # print(splitter) + + # for x in lineList: + # lineList+=x + # line=lineList + + # line=line.lstrip('[') + # line=line.rstrip(']') + + line = re.sub(r', [][][^ ][azAZ] and ', ', ', line) + # line = re.sub(r'[][^ ][azAZ] and ', ', ', line) + line = re.sub(r' and ', ', ', line) + endline=line + + # print(endline) + + + + # for x in lineList: + # print(x) + # lineList+=x + # line2=lineList + + # return line2 + + # return type(endline[:]) + return endline[:] + + +# andReplacer=andReplacer() +# print(andReplacer) diff --git a/documentation/02nd20docs/31authorAndReplacer.md b/documentation/02nd20docs/31authorAndReplacer.md new file mode 100644 index 0000000..9c1f4eb --- /dev/null +++ b/documentation/02nd20docs/31authorAndReplacer.md @@ -0,0 +1,11 @@ +# author and and Replacer + +```python +import re +splitter = re.compile(r'(\s+|\S+)') +splitter.findall(s) +``` + +[how to tokenizer split line delimiter python - Google Search](https://www.google.com/search?q=how+to+tokenizer+split+line+delimiter+python&oq=how+to+tokenizer+split+line+delimiter+python&gs_lcrp=EgZjaHJvbWUyCwgAEEUYChg5GKABMgkIARAhGAoYoAHSAQkxNDY1NGowajeoAgCwAgA&sourceid=chrome&ie=UTF-8) + +[tokenize a string keeping delimiters in Python - Stack Overflow](https://stackoverflow.com/questions/1820336/tokenize-a-string-keeping-delimiters-in-python) diff --git a/main.py b/main.py index acb4830..5400a33 100644 --- a/main.py +++ b/main.py @@ -97,7 +97,26 @@ # __________________________________ -reference = author + ' (' + year + ') ' + '\'' + title + '\'' + ',' + ' ' + journal + ',' + ' ' + volume + '(' + number + ')' + ' ' + 'pp. ' + pages + ', ' + 'available: ' + doi + ' / ' + url + ' [accessed ' + date + '].' +import andReplacer + +andReplacer = andReplacer.andReplacer() + +# __________________________________ + +# note on import of last or each module above: +# import module (file), then do +# imported_module.the_function_in_the_imported_module() +# and set the name previously used for the module to be the_function_from_the_module + +# __________________________________ + +# can use either of the following lines: + +# reference = author + ' (' + year + ') ' + '\'' + title + '\'' + ',' + ' ' + journal + ',' + ' ' + volume + '(' + number + ')' + ' ' + 'pp. ' + pages + ', ' + 'available: ' + doi + ' / ' + url + ' [accessed ' + date + '].' +reference = andReplacer + ' (' + year + ') ' + '\'' + title + '\'' + ',' + ' ' + journal + ',' + ' ' + volume + '(' + number + ')' + ' ' + 'pp. ' + pages + ', ' + 'available: ' + doi + ' / ' + url + ' [accessed ' + date + '].' + + +# reference = andReplacer.andReplacer() # print("title =", title) # print("journal", journal)