{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "f720c141", "metadata": {}, "outputs": [], "source": [ "import os\n", "import sys\n", "from random import randint\n", "import random\n", "import time\n", "from datetime import datetime\n", "import re, string, unicodedata\n", "import nltk\n", "import contractions\n", "import inflect\n", "from bs4 import BeautifulSoup\n", "from nltk import word_tokenize, sent_tokenize\n", "from nltk.corpus import stopwords\n", "from nltk.stem.isri import ISRIStemmer\n", "from nltk.stem.porter import PorterStemmer\n", "from nltk.stem.snowball import SnowballStemmer\n", "from nltk.stem import LancasterStemmer, WordNetLemmatizer\n", "from nltk.tag import StanfordNERTagger\n", "from nltk.tokenize import word_tokenize, sent_tokenize\n", "import spacy\n", "import torch\n", "from collections import defaultdict\n", "import pickle\n", "import numpy as np\n", "import re\n", "\n", "sys.path.append(os.path.abspath(\"../lib\"))\n", "from util import *\n", "from mlutil import *\n", "\n", "lcc = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\",\"l\",\"m\",\"n\",\"o\",\n", "\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n", "ucc = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\", \"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\n", "dig = [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]\n", "spc = [\"@\",\"#\",\"$\",\"%\",\"^\",\"&\",\"*\",\"(\",\")\",\"_\",\"+\",\"{\",\"}\",\"[\",\"]\",\"|\",\":\",\"<\",\">\",\"?\",\";\",\",\",\".\"]\n", "\n", "\n", "class TextPreProcessor:\n", " \"\"\"\n", " text preprocessor\n", " \"\"\"\n", " def __init__(self, stemmer = \"lancaster\", verbose=False):\n", " self.verbose = verbose\n", " self.lemmatizer = WordNetLemmatizer()\n", "\n", " def stripHtml(self, text):\n", " soup = BeautifulSoup(text, \"html.parser\")\n", " return soup.get_text()\n", "\n", " def removeBetweenSquareBrackets(self, text):\n", " return re.sub('\\[[^]]*\\]', '', text)\n", "\n", " def denoiseText(self, text):\n", " text = stripHtml(text)\n", " text = removeBetweenSquareBrackets(text)\n", " return text\n", "\n", " def replaceContractions(self, text):\n", " \"\"\"Replace contractions in string of text\"\"\"\n", " return contractions.fix(text)\n", "\n", " def tokenize(self, text):\n", " words = nltk.word_tokenize(text)\n", " return words\n", "\n", " def removeNonAscii(self, words):\n", " \"\"\"Remove non-ASCII characters from list of tokenized words\"\"\"\n", " newWords = []\n", " for word in words:\n", " if isinstance(word, unicode):\n", " newWord = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore')\n", " else:\n", " newWord = word\n", " newWords.append(newWord)\n", " return newWords\n", "\n", " def replaceNonAsciiFromText(self, text):\n", " \"\"\" replaces non ascii with blank \"\"\"\n", " return ''.join([i if ord(i) < 128 else ' ' for i in text])\n", "\n", " def removeNonAsciiFromText(self, text):\n", " \"\"\" replaces non ascii with blank \"\"\"\n", " return ''.join([i if ord(i) < 128 else '' for i in text])\n", "\n", " def allow(self, words):\n", " \"\"\" allow only specific charaters \"\"\"\n", " allowed = [word for word in words if re.match('^[A-Za-z0-9\\.\\,\\:\\;\\!\\?\\(\\)\\'\\-\\$\\@\\%\\\"]+$', word) is not None]\t\t\n", " return allowed\t\t\n", "\n", " def toLowercase(self, words):\n", " \"\"\"Convert all characters to lowercase from list of tokenized words\"\"\"\n", " newWords = [word.lower() for word in words]\n", " return newWords\n", "\n", " def removePunctuation(self, words):\n", " \"\"\"Remove punctuation from list of tokenized words\"\"\"\n", " newWords = []\n", " for word in words:\n", " newWord = re.sub(r'[^\\w\\s]', '', word)\n", " if newWord != '':\n", " newWords.append(newWord)\n", " return newWords\n", "\n", " def replaceNumbers(self, words):\n", " \"\"\"Replace all interger occurrences in list of tokenized words with textual representation\"\"\"\n", " p = inflect.engine()\n", " newWords = []\n", " for word in words:\n", " if word.isdigit():\n", " newWord = p.number_to_words(word)\n", " newWords.append(newWord)\n", " else:\n", " newWords.append(word)\n", " return newWords\n", "\n", " def removeStopwords(self, words):\n", " \"\"\"Remove stop words from list of tokenized words\"\"\"\n", " newWords = []\n", " for word in words:\n", " if word not in stopwords.words('english'):\n", " newWords.append(word)\n", " return newWords\n", "\n", " def removeCustomStopwords(self, words, stopWords):\n", " \"\"\"Remove stop words from list of tokenized words\"\"\"\n", " removed = [word for word in words if word not in stopWords]\t\t\n", " return removed\n", "\n", " def removeLowFreqWords(self, words, minFreq):\n", " \"\"\"Remove low frewquncy words from list of tokenized words\"\"\"\n", " frequency = defaultdict(int)\n", " for word in words:\n", " frequency[word] += 1\n", " removed = [word for word in words if frequency[word] > minFreq]\t\t\n", " return removed\t\n", "\n", " def removeNumbers(self, words):\n", " \"\"\"Remove numbers\"\"\"\n", " removed = [word for word in words if not isNumber(word)]\t\t\n", " return removed\t\t\n", "\n", " def removeShortWords(self, words, minLengh):\n", " \"\"\"Remove short words \"\"\"\n", " removed = [word for word in words if len(word) >= minLengh]\t\t\n", " return removed\t\t\n", "\n", " def keepAllowedWords(self, words, keepWords):\n", " \"\"\"Keep words from the list only\"\"\"\n", " kept = [word for word in words if word in keepWords]\t\t\n", " return kept\n", "\n", " def stemWords(self, words):\n", " \"\"\"Stem words in list of tokenized words\"\"\"\n", " if stemmer == \"lancaster\":\n", " stemmer = LancasterStemmer()\n", " elif stemmer == \"snowbal\":\n", " stemmer = SnowballStemmer()\n", " elif stemmer == \"porter\":\n", " stemmer = PorterStemmer()\n", " stems = [stemmer.stem(word) for word in words]\n", " return stems\n", "\n", " def lemmatizeWords(self, words):\n", " \"\"\"Lemmatize tokens in list of tokenized words\"\"\"\n", " lemmas = [self.lemmatizer.lemmatize(word) for word in words]\n", " return lemmas\n", "\n", " def lemmatizeVerbs(self, words):\n", " \"\"\"Lemmatize verbs in list of tokenized words\"\"\"\n", " lemmas = [self.lemmatizer.lemmatize(word, pos='v') for word in words]\n", " return lemmas\n", "\n", " def normalize(self, words):\n", " words = self.removeNonAscii(words)\n", " words = self.toLowercase(words)\n", " words = self.removePunctuation(words)\n", " words = self.replaceNumbers(words)\n", " words = self.removeStopwords(words)\n", " return words\n", "\n", " def posTag(self, textTokens):\n", " tags = nltk.pos_tag(textTokens)\n", " return tags\n", "\n", " def extractEntity(self, textTokens, classifierPath, jarPath):\n", " st = StanfordNERTagger(classifierPath, jarPath) \n", " entities = st.tag(textTokens)\n", " return entities\n", "\n", " def documentFeatures(self, document, wordFeatures):\n", " documentWords = set(document)\n", " features = {}\n", " for word in wordFeatures:\n", " features[word] = (word in documentWords)\n", " return features\n", "\n", "class NGram:\n", " \"\"\"\n", " word ngram\n", " \"\"\"\n", " def __init__(self, vocFilt, verbose=False):\n", " \"\"\"\n", " initialize\n", " \"\"\"\n", " self.vocFilt = vocFilt\n", " self.nGramCounter = dict()\n", " self.nGramFreq = dict()\n", " self.corpSize = 0\n", " self.vocabulary = set()\n", " self.freqDone = False\n", " self.verbose = verbose\n", " self.vecWords = None\n", " self.nonZeroCount = 0\n", "\n", " def countDocNGrams(self, words):\n", " \"\"\"\n", " count words in a doc\n", " \"\"\"\n", " if self.verbose:\n", " print (\"doc size \" + str(len(words)))\n", " nGrams = self.toNGram(words)\n", " for nGram in nGrams:\n", " count = self.nGramCounter.get(nGram, 0)\n", " self.nGramCounter[nGram] = count + 1\n", " self.corpSize += 1\n", " self.vocabulary.update(words)\t\n", "\n", " def remLowCount(self, minCount):\n", " \"\"\"\n", " removes items with count below threshold\n", " \"\"\"\n", " self.nGramCounter = dict(filter(lambda item: item[1] >= minCount, self.nGramCounter.items()))\n", "\n", " def getVocabSize(self):\n", " \"\"\"\n", " get vocabulary size\n", " \"\"\"\n", " return len(self.nGramCounter)\n", "\n", " def getNGramFreq(self):\n", " \"\"\"\n", " get normalized count\n", " \"\"\"\n", " if self.verbose:\n", " print (\"counter size \" + str(len(self.nGramCounter)))\n", " if not self.freqDone:\n", " for item in self.nGramCounter.items():\n", " self.nGramFreq[item[0]] = float(item[1]) / self.corpSize\t\t\t\t\t\n", " self.freqDone = True\n", " return self.nGramFreq\n", "\n", " def getNGramIndex(self, show):\n", " \"\"\"\n", " convert to list\n", " \"\"\"\n", " if self.vecWords is None:\n", " self.vecWords = list(self.nGramCounter)\n", " if show:\n", " for vw in enumerate(self.vecWords):\n", " print(vw)\n", "\n", " def getVector(self, words, byCount, normalized):\n", " \"\"\"\n", " convert to vector\n", " \"\"\"\n", " if self.vecWords is None:\n", " self.vecWords = list(self.nGramCounter)\n", "\n", " nGrams = self.toNGram(words)\n", " if self.verbose:\n", " print(\"vocabulary size {}\".format(len(self.vecWords)))\n", " print(\"ngrams\")\n", " print(nGrams)\n", " self.nonZeroCount = 0\n", " vec = list(map(lambda vw: self.getVecElem(vw, nGrams, byCount, normalized), self.vecWords))\n", " return vec\n", "\n", " def getVecElem(self, vw, nGrams, byCount, normalized):\n", " \"\"\"\n", " get vector element\n", " \"\"\"\n", " if vw in nGrams:\n", " if byCount:\n", " if normalized:\n", " el = self.nGramFreq[vw]\n", " else:\n", " el = self.nGramCounter[vw]\n", " else:\n", " el = 1\n", " self.nonZeroCount += 1\n", " else:\n", " if (byCount and normalized):\n", " el = 0.0\n", " else:\n", " el = 0\n", " return el\n", "\n", " def getNonZeroCount(self):\n", " \"\"\"\n", " get non zero vector element count\n", " \"\"\"\n", " return self.nonZeroCount\n", "\n", " def toBiGram(self, words):\n", " \"\"\"\n", " convert to bigram\n", " \"\"\"\n", " if self.verbose:\n", " print (\"doc size \" + str(len(words)))\n", " biGrams = list()\n", " for i in range(len(words)-1):\n", " w1 = words[i]\n", " w2 = words[i+1]\n", " if self.vocFilt is None or (w1 in self.vocFilt and w2 in self.vocFilt):\n", " nGram = (w1, w2)\n", " biGrams.append(nGram)\n", " return biGrams\n", "\n", " def toTriGram(self, words):\n", " \"\"\"\n", " convert to trigram\n", " \"\"\"\n", " if self.verbose:\n", " print (\"doc size \" + str(len(words)))\n", " triGrams = list()\n", " for i in range(len(words)-2):\n", " w1 = words[i]\n", " w2 = words[i+1]\n", " w3 = words[i+2]\n", " if self.vocFilt is None or (w1 in self.vocFilt and w2 in self.vocFilt and w3 in self.vocFilt):\n", " nGram = (w1, w2, w3)\n", " triGrams.append(nGram)\n", " return triGrams\n", "\n", " def save(self, saveFile):\n", " \"\"\"\n", " save \n", " \"\"\"\n", " sf = open(saveFile, \"wb\")\n", " pickle.dump(self, sf)\n", " sf.close()\n", "\n", " @staticmethod\n", " def load(saveFile):\n", " \"\"\"\n", " load\n", " \"\"\"\n", " sf = open(saveFile, \"rb\")\n", " nGrams = pickle.load(sf)\n", " sf.close()\n", " return nGrams\n", "\n", "class CharNGram:\n", " \"\"\"\n", " character n gram\n", " \"\"\"\n", " def __init__(self, domains, ngsize, verbose=False):\n", " \"\"\"\n", " initialize\n", " \"\"\"\n", " self.chDomain = list()\n", " self.ws = \"#\"\n", " self.chDomain.append(self.ws)\n", " for d in domains:\n", " if d == \"lcc\":\n", " self.chDomain.extend(lcc)\n", " elif d == \"ucc\":\n", " self.chDomain.extend(ucc)\n", " elif d == \"dig\":\n", " self.chDomain.extend(dig)\n", " elif d == \"spc\":\n", " self.chDomain.extend(spc)\n", " else:\n", " raise ValueError(\"invalid character type \" + d)\n", "\n", " self.ngsize = ngsize\n", " self.radixPow = None\n", " self.cntVecSize = None\n", "\n", " def addSpChar(self, spChar):\n", " \"\"\"\n", " add special characters\n", " \"\"\"\n", " self.chDomain.extend(spChar)\n", "\n", " def setWsRepl(self, ws):\n", " \"\"\"\n", " set white space replacement charater\n", " \"\"\"\n", " self.ws = ws\n", " self.chDomain[0] = self.ws\n", "\n", " def finalize(self):\n", " \"\"\"\n", " final setup\n", " \"\"\"\t\t\n", " domSize = len(self.chDomain)\n", " self.cntVecSize = int(math.pow(domSize, self.ngsize))\n", " if self.radixPow is None:\n", " self.radixPow = list()\n", " for i in range(self.ngsize-1, 0, -1):\n", " self.radixPow.append(int(math.pow(domSize, i)))\n", " self.radixPow.append(1)\n", "\n", "\n", " def toMgramCount(self, text):\n", " \"\"\"\n", " get ngram count list\n", " \"\"\"\n", " #print(text)\n", " ngCounts = [0] * self.cntVecSize\n", "\n", " ngram = list()\n", " totNgCount = 0\n", " for ch in text:\n", " if ch.isspace():\n", " l = len(ngram)\n", " if l == 0 or ngram[l-1] != self.ws:\n", " ngram.append(self.ws)\n", " else:\n", " ngram.append(ch)\n", "\n", " if len(ngram) == self.ngsize:\n", " i = self.__getNgramIndex(ngram)\n", " assert i < self.cntVecSize, \"ngram index out of range index \" + str(i) + \" size \" + str(self.cntVecSize) \n", " ngCounts[i] += 1\n", " ngram.clear()\n", " totNgCount += 1\n", "\n", " return ngCounts\n", "\n", " def __getNgramIndex(self, ngram):\n", " \"\"\"\n", " get index of an ngram into a list of size equal total number of possible ngrams\n", " \"\"\"\n", " assert len(ngram) == len(self.radixPow), \"ngram size mismatch\"\t\t\n", " ngi = 0\n", " for ch, rp in zip(ngram, self.radixPow):\n", " i = self.chDomain.index(ch)\n", " ngi += i * rp\n", "\n", " return ngi\n", "\n", "\n", "class TfIdf:\n", " \"\"\"\n", " TF IDF\t\n", " \"\"\"\n", " def __init__(self, vocFilt, doIdf, verbose=False):\n", " \"\"\"\n", " initialize\n", " \"\"\"\n", " self.vocFilt = vocFilt\n", " self.doIdf = doIdf\n", " self.wordCounter = {}\n", " self.wordFreq = {}\n", " self.wordInDocCount = {}\n", " self.docCount = 0\n", " self.corpSize = 0\n", " self.freqDone = False\n", " self.vocabulary = set()\n", " self.wordIndex = None\n", " self.verbose = verbose\n", " self.vecWords = None\n", "\n", " def countDocWords(self, words):\n", " \"\"\"\n", " count words in a doc\n", " \"\"\"\n", " if self.verbose:\n", " print (\"doc size \" + str(len(words)))\n", " for word in words:\n", " if self.vocFilt is None or word in self.vocFilt:\n", " count = self.wordCounter.get(word, 0)\n", " self.wordCounter[word] = count + 1\n", " self.corpSize += len(words)\n", " self.vocabulary.update(words)\n", "\n", " if (self.doIdf):\n", " self.docCount += 1\n", " for word in set(words):\n", " self.wordInDocCount.get(word, 0)\n", " self.wordInDocCount[word] = count + 1\n", " self.freqDone = False\n", "\n", "\n", " def getWordFreq(self):\n", " \"\"\"\n", " get tfidf for corpus\n", " \"\"\"\n", " if self.verbose:\n", " print (\"counter size \" + str(len(self.wordCounter)))\n", " if not self.freqDone:\n", " for item in self.wordCounter.items():\n", " self.wordFreq[item[0]] = float(item[1]) / self.corpSize\t\t\t\t\t\n", " if self.doIdf:\n", " for k in self.wordFreq.keys():\n", " self.wordFreq.items[k] *= math.log(self.docCount / self.wordInDocCount.items[k])\t\n", " self.freqDone = True\n", " return self.wordFreq\n", "\n", " def getCount(self, word):\n", " \"\"\"\n", " get counter\n", " \"\"\"\n", " if word in self.wordCounter:\n", " count = self.wordCounter[word]\n", " else:\n", " raise ValueError(\"word not found in count table \" + word)\n", " return count\n", "\n", " def getFreq(self, word):\n", " \"\"\"\n", " get normalized frequency\n", " \"\"\"\n", " if word in self.wordFreq:\n", " freq = self.wordFreq[word]\n", " else:\n", " raise ValueError(\"word not found in count table \" + word)\n", " return freq\n", "\n", " def resetCounter(self):\n", " \"\"\"\n", " reset counter\n", " \"\"\"\n", " self.wordCounter = {}\n", "\n", " def buildVocabulary(self, words):\n", " \"\"\"\n", " build vocbulary\n", " \"\"\"\n", " self.vocabulary.update(words)\n", "\n", " def getVocabulary(self):\n", " \"\"\"\n", " return vocabulary\n", " \"\"\"\n", " return self.vocabulary\n", "\n", " def creatWordIndex(self):\n", " \"\"\"\n", " index for all words in vcabulary\n", " \"\"\"\n", " self.wordIndex = {word : idx for idx, word in enumerate(list(self.vocabulary))}\n", "\n", " def getVector(self, words, byCount, normalized):\n", " \"\"\"\n", " get vector\n", " \"\"\"\n", " if self.vecWords is None:\n", " self.vecWords = list(self.wordCounter)\n", " vec = list(map(lambda vw: self.getVecElem(vw, words, byCount, normalized), self.vecWords))\n", " return vec\n", "\n", " def getVecElem(self, vw, words, byCount, normalized):\n", " \"\"\"\n", " vector element\n", " \"\"\"\n", " el = 0\n", " if vw in words:\n", " if byCount:\n", " if normalized:\n", " el = self.wordFreq[vw]\n", " else:\n", " el = self.wordCounter[vw]\n", " else:\n", " el = 1\n", " return el\n", "\n", " def save(self, saveFile):\n", " \"\"\"\n", " save\n", " \"\"\"\n", " sf = open(saveFile, \"wb\")\n", " pickle.dump(self, sf)\n", " sf.close()\n", "\n", " # load \n", " @staticmethod\n", " def load(saveFile):\n", " \"\"\"\n", " load\n", " \"\"\"\n", " sf = open(saveFile, \"rb\")\n", " tfidf = pickle.load(sf)\n", " sf.close()\n", " return tfidf\n", "\n", "# bigram\n", "class BiGram(NGram):\n", " def __init__(self, vocFilt, verbose=False):\n", " \"\"\"\n", " initialize\n", " \"\"\"\n", " super(BiGram, self).__init__(vocFilt, verbose)\n", "\n", " def toNGram(self, words):\n", " \"\"\"\n", " convert to Ngrams\n", " \"\"\"\n", " return self.toBiGram(words)\n", "\n", "# trigram\n", "class TriGram(NGram):\n", " def __init__(self, vocFilt, verbose=False):\n", " \"\"\"\n", " initialize\n", " \"\"\"\n", " super(TriGram, self).__init__(vocFilt, verbose)\n", "\n", " def toNGram(self, words):\n", " \"\"\"\n", " convert to Ngrams\n", " \"\"\"\n", " return self.toTriGram(words)\n", "\n", "\n", "\n", "class DocSentences:\n", " \"\"\"\n", " sentence processor\n", " \"\"\"\n", " def __init__(self, filePath, minLength, verbose, text=None):\n", " \"\"\"\n", " initialize\n", " \"\"\"\n", " if filePath:\n", " self.filePath = filePath\n", " with open(filePath, 'r') as contentFile:\n", " content = contentFile.read()\n", " elif text:\n", " content = text\n", " else:\n", " raise valueError(\"either file path or text must be provided\")\n", "\n", " #self.sentences = content.split('.')\n", " self.verbose = verbose\n", " tp = TextPreProcessor()\n", " content = tp.removeNonAsciiFromText(content)\n", " sentences = sent_tokenize(content)\n", " self.sentences = list(filter(lambda s: len(nltk.word_tokenize(s)) >= minLength, sentences))\n", " if self.verbose:\n", " print (\"num of senteces after length filter \" + str(len(self.sentences)))\n", " self.sentencesAsTokens = [clean(s, tp, verbose) for s in self.sentences]\t\n", "\n", " # get sentence tokens\n", " def getSentencesAsTokens(self):\n", " return self.sentencesAsTokens\n", "\n", " # get sentences\n", " def getSentences(self):\n", " return self.sentences\n", "\n", " # build term freq table\n", " def getTermFreqTable(self):\n", " # term count table for all words\n", " termTable = TfIdf(None, False)\n", " sentWords = self.getSentencesAsTokens()\n", " for seWords in sentWords:\n", " termTable.countDocWords(seWords)\n", " return termTable\n", "\n", "# sentence processor\n", "class WordVectorContainer:\n", " def __init__(self, dirPath, verbose):\n", " \"\"\"\n", " initialize\n", " \"\"\"\n", " self.docs = list()\n", " self.wordVectors = list()\n", " self.tp = TextPreProcessor()\n", " self.similarityAlgo = \"cosine\"\n", " self.simAlgoNormalizer = None\n", " self.termTable = None\n", "\n", "\n", " def addDir(self, dirPath):\n", " \"\"\"\n", " add content of all files ina directory\n", " \"\"\"\n", " docs, filePaths = getFileContent(dirPath, verbose)\n", " self.docs.extend(docs)\n", " self.wordVectors.extend([clean(doc, self.tp, verbose) for doc in docs])\n", "\n", " def addFile(self, filePath):\n", " \"\"\"\n", " add file content\n", " \"\"\"\n", " with open(filePath, 'r') as contentFile:\n", " content = contentFile.read()\n", " self.wordVectors.append(clean(content, self.tp, verbose))\n", "\n", " def addText(self, text):\n", " \"\"\"\n", " add text\n", " \"\"\"\n", " self.wordVectors.append(clean(text, self.tp, verbose))\n", "\n", " def addWords(self, words):\n", " \"\"\"\n", " add words\n", " \"\"\"\n", " self.wordVectors.append(words)\n", "\n", " def withSimilarityAlgo(self, algo, normalizer=None):\n", " \"\"\"\n", " set similarity algo\n", " \"\"\"\n", " self.similarityAlgo = algo\n", " self.simAlgoNormalizer = normalizer\n", "\n", " def getDocsWords(self):\n", " \"\"\"\n", " get word vectors\n", " \"\"\"\n", " return self.wordVectors\n", "\n", " def getDocs(self):\n", " \"\"\"\n", " get docs\n", " \"\"\"\n", " return self.docs\n", "\n", " def getTermFreqTable(self):\n", " \"\"\"\n", " term count table for all words\n", " \"\"\"\n", " self.termTable = TfIdf(None, False)\n", " for words in self.wordVectors:\n", " self.termTable.countDocWords(words)\n", " self.termTable.getWordFreq()\n", " return self.termTable\n", "\n", " def getPairWiseSimilarity(self, byCount, normalized):\n", " \"\"\"\n", " pair wise similarity\n", " \"\"\"\n", " self.getNumWordVectors()\n", "\n", " size = len(self.wordVectors)\n", " simArray = np.empty(shape=(size,size))\n", " for i in range(size):\n", " simArray[i][i] = 1.0\n", "\n", " for i in range(size):\n", " for j in range(i+1, size):\n", " if self.similarityAlgo == \"cosine\":\n", " sim = cosineSimilarity(self.numWordVectors[i], self.numWordVectors[j])\n", " elif self.similarityAlgo == \"jaccard\":\n", " sim = jaccardSimilarity(self.wordVectors[i], self.wordVectors[j],\\\n", " self.simAlgoNormalizer[0], self.simAlgoNormalizer[1])\n", " else:\n", " raise ValueError(\"invalid similarity algorithms\")\n", " simArray[i][j] = sim\n", " simArray[j][i] = sim\n", " return simArray\n", "\n", " def getInterSetSimilarity(self, byCount, normalized, split):\n", " \"\"\"\n", " inter set pair wise similarity\n", " \"\"\"\n", " self.getNumWordVectors()\n", " size = len(self.wordVectors)\n", " if not self.similarityAlgo == \"jaccard\":\n", " firstNumVec = self.numWordVectors[:split]\n", " secNumVec = self.numWordVectors[split:]\n", " fiSize = len(firstNumVec)\n", " seSize = len(secNumVec)\n", " else:\n", " firstVec = self.wordVectors[:split]\n", " secVec = self.wordVectors[split:]\n", " fiSize = len(firstVec)\n", " seSize = len(secVec)\n", "\n", " simArray = np.empty(shape=(fiSize,seSize))\n", " for i in range(fiSize):\n", " for j in range(seSize):\n", " if self.similarityAlgo == \"cosine\":\n", " sim = cosineSimilarity(firstNumVec[i], secNumVec[j])\n", " elif self.similarityAlgo == \"jaccard\":\n", " sim = jaccardSimilarity(firstVec[i], secVec[j],\\\n", " self.simAlgoNormalizer[0], self.simAlgoNormalizer[1])\n", " else:\n", " raise ValueError(\"invalid similarity algorithms\")\n", " simArray[i][j] = sim\n", " return simArray\n", "\n", " def getNumWordVectors(self):\n", " \"\"\"\n", " get vectors\n", " \"\"\"\n", " if not self.similarityAlgo == \"jaccard\":\n", " if self.numWordVectors is None:\n", " self.numWordVectors = list(map(lambda wv: self.termTable.getVector(wv, byCount, normalized), self.wordVectors))\n", "\n", "# fragments documents into whole doc, paragraph or passages\n", "class TextFragmentGenerator:\n", " def __init__(self, level, minParNl, passSize, verbose=False):\n", " \"\"\"\n", " initialize\n", " \"\"\"\n", " self.level = level\n", " self.minParNl = minParNl\n", " self.passSize = passSize\n", " self.fragments = None\n", " self.verbose = verbose\n", "\n", " def loadDocs(self, fpaths):\n", " \"\"\"\n", " loads documents from one file, multiple files or all files under directory\n", " \"\"\"\n", " fPaths = fpaths.split(\",\")\n", " if len(fPaths) == 1:\n", " if os.path.isfile(fPaths[0]):\n", " #one file\n", " if self.verbose:\n", " print(\"got one file from path\")\n", " dnames = fPaths\n", " docStr = getOneFileContent(fPaths[0])\n", " dtexts = [docStr]\n", " else:\n", " #all files under directory\n", " if self.verbose:\n", " print(\"got all files under directory from path\")\n", " dtexts, dnames = getFileContent(fPaths[0])\n", " if self.verbose:\n", " print(\"found {} files\".format(len(dtexts)))\n", " else:\n", " #list of files\n", " if self.verbose: \n", " print(\"got list of files from path\")\n", " dnames = fPaths\n", " dtexts = list(map(getOneFileContent, fpaths))\n", " if self.verbose:\n", " print(\"found {} files\".format(len(dtexts)))\n", "\n", " ndocs = (dtexts, dnames)\t\n", " if self.verbose:\n", " print(\"docs\")\n", " for dn, dt in zip(dnames, dtexts):\n", " print(dn + \"\\t\" + dt[:40])\n", "\n", " return ndocs\n", "\n", " def generateFragmentsFromFiles(self, fpaths):\n", " \"\"\"\n", " fragments documents into whole doc, paragraph or passages\n", " \"\"\"\n", " dtexts, dnames = self.loadDocs(fpaths)\n", " return self.generateFragments(dtexts, dnames)\n", "\n", "\n", " def generateFragmentsFromNamedDocs(self, ndocs):\n", " \"\"\"\n", " fragments documents into whole doc, paragraph or passages\n", " \"\"\"\n", " dtexts = list(map(lambda nd : nd[1], ndocs))\n", " dnames = list(map(lambda nd : nd[0], ndocs))\n", " #for i in range(len(dtexts)):\n", " #\tprint(dnames[i])\n", " #\tprint(dtexts[i][:40])\n", " return self.generateFragments(dtexts, dnames)\n", "\n", " def generateFragments(self, dtexts, dnames):\n", " \"\"\"\n", " fragments documents into whole doc, paragraph or passages\n", " \"\"\"\n", " if self.level == \"para\" or self.level == \"passage\":\n", " #split paras\n", " dptexts = list()\n", " dpnames = list()\n", " for dt, dn in zip(dtexts, dnames):\n", " paras = getParas(dt, self.minParNl)\n", " if self.verbose:\n", " print(dn)\n", " print(\"no of paras {}\".format(len(paras)))\n", " dptexts.extend(paras)\n", " pnames = list(map(lambda i : dn + \":\" + str(i), range(len(paras))))\n", " dpnames.extend(pnames)\n", " dtexts = dptexts\n", " dnames = dpnames\n", "\n", " if self.level == \"passage\":\n", " #split each para into passages\n", " dptexts = list()\n", " dpnames = list()\n", " for dt, dn in zip(dtexts, dnames):\n", " sents = sent_tokenize(dt.strip())\t\t\t\n", " if self.verbose:\n", " print(dn)\n", " print(\"no of sentences {}\".format(len(sents)))\n", " span = self.passSize\n", " if len(sents) <= span:\n", " pass\n", " else:\n", " for i in range(0, len(sents) - span, 1):\n", " dptext = None\n", " for j in range(span):\n", " if dptext is None:\n", " dptext = sents[i + j] + \". \"\n", " else:\n", " dptext = dptext + sents[i + j] + \". \" \n", " dpname = dn + \":\" + str(i)\n", " dptexts.append(dptext)\n", " dpnames.append(dpname)\n", "\n", " dtexts = dptexts\n", " dnames = dpnames\n", "\n", " self.fragments = list(zip(dnames, dtexts))\n", " #if self.verbose:\n", " #\tprint(\"num fragments {}\".format(len(self.fragments)))\n", " return self.fragments\n", "\n", " def showFragments(self):\n", " \"\"\"\n", " show fragments\n", " \"\"\"\n", " print(\"showing all \" + self.level + \" for the first 40 characters\")\n", " for dn, dt in self.fragments:\n", " print(dn + \"\\t\" + dt[:40])\n", "\n", " def isDocLevel(self):\n", " \"\"\"\n", " true if fragment is at doc level\n", " \"\"\"\n", " return self.level != \"para\" and self.level != \"passage\"\n", "\n", "# clean doc to create term array\n", "def clean(doc, preprocessor, verbose):\n", " \"\"\"\n", " text pre process\n", " \"\"\"\n", " if verbose:\n", " print (\"--raw doc\")\n", " print (doc)\n", " #print \"next clean\"\n", " doc = preprocessor.removeNonAsciiFromText(doc)\n", " words = preprocessor.tokenize(doc)\n", " words = preprocessor.allow(words)\n", " words = preprocessor.toLowercase(words)\n", " words = preprocessor.removeStopwords(words)\n", " words = preprocessor.removeShortWords(words, 3)\n", " words = preprocessor.removePunctuation(words)\n", " words = preprocessor.lemmatizeWords(words)\n", " #words = preprocessor.removeNonAscii(words)\n", " if verbose:\n", " print (\"--after pre processing\")\n", " print (words)\n", " return words\n", "\n", "# get sentences\n", "def getSentences(filePath):\n", " \"\"\"\n", " text pre process\n", " \"\"\"\n", " with open(filePath, 'r') as contentFile:\n", " content = contentFile.read()\n", " sentences = content.split('.')\n", " return sentences\n", "\n", "def getParas(text, minParNl=2):\n", " \"\"\"\n", " split into paras\n", " \"\"\"\n", " regx = \"\\n+\" if minParNl == 1 else \"\\n{2,}\"\n", " paras = re.split(regx, text.replace(\"\\r\\n\", \"\\n\"))\n", " return paras\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.12" } }, "nbformat": 4, "nbformat_minor": 5 }