904 lines
51 KiB
Java
904 lines
51 KiB
Java
package FunctionLayer.StanfordParser;
|
|
|
|
import FunctionLayer.LevenshteinDistance;
|
|
import FunctionLayer.Datahandler;
|
|
import FunctionLayer.SimilarityMatrix;
|
|
import com.google.common.collect.MapMaker;
|
|
import edu.mit.jmwe.data.IMWE;
|
|
import edu.mit.jmwe.data.IMWEDesc;
|
|
import edu.mit.jmwe.data.IToken;
|
|
import edu.stanford.nlp.ie.AbstractSequenceClassifier;
|
|
import edu.stanford.nlp.ling.CoreAnnotations;
|
|
import edu.stanford.nlp.ling.CoreLabel;
|
|
import edu.stanford.nlp.ling.HasWord;
|
|
import edu.stanford.nlp.ling.IndexedWord;
|
|
import edu.stanford.nlp.ling.JMWEAnnotation;
|
|
import edu.stanford.nlp.ling.TaggedWord;
|
|
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations;
|
|
import edu.stanford.nlp.parser.shiftreduce.ShiftReduceParser;
|
|
import edu.stanford.nlp.pipeline.Annotation;
|
|
import edu.stanford.nlp.pipeline.CoreDocument;
|
|
import edu.stanford.nlp.pipeline.CoreEntityMention;
|
|
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
|
|
import edu.stanford.nlp.process.CoreLabelTokenFactory;
|
|
import edu.stanford.nlp.process.DocumentPreprocessor;
|
|
import edu.stanford.nlp.process.PTBTokenizer;
|
|
import edu.stanford.nlp.process.TokenizerFactory;
|
|
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations;
|
|
import edu.stanford.nlp.sequences.DocumentReaderAndWriter;
|
|
import edu.stanford.nlp.tagger.maxent.MaxentTagger;
|
|
import edu.stanford.nlp.trees.Constituent;
|
|
import edu.stanford.nlp.trees.GrammaticalRelation;
|
|
import edu.stanford.nlp.trees.GrammaticalStructure;
|
|
import edu.stanford.nlp.trees.GrammaticalStructureFactory;
|
|
import edu.stanford.nlp.trees.Tree;
|
|
import edu.stanford.nlp.trees.TreeCoreAnnotations;
|
|
import edu.stanford.nlp.trees.TypedDependency;
|
|
import edu.stanford.nlp.trees.tregex.gui.Tdiff;
|
|
import edu.stanford.nlp.util.CoreMap;
|
|
import java.io.StringReader;
|
|
import java.util.ArrayList;
|
|
import java.util.Collection;
|
|
import java.util.List;
|
|
import java.util.Map;
|
|
import java.util.Objects;
|
|
import java.util.OptionalDouble;
|
|
import java.util.Set;
|
|
import java.util.concurrent.Callable;
|
|
import java.util.concurrent.ConcurrentMap;
|
|
import java.util.concurrent.atomic.AtomicInteger;
|
|
import java.util.function.BinaryOperator;
|
|
import java.util.function.Function;
|
|
import org.ejml.simple.SimpleMatrix;
|
|
|
|
/*
|
|
* To change this license header, choose License Headers in Project Properties.
|
|
* To change this template file, choose Tools | Templates
|
|
* and open the template in the editor.
|
|
*/
|
|
/**
|
|
*
|
|
* @author install1
|
|
*/
|
|
public class SentimentAnalyzerTest implements Callable<SimilarityMatrix> {
|
|
|
|
private SimilarityMatrix smxParam;
|
|
private String str;
|
|
private String str1;
|
|
private MaxentTagger tagger;
|
|
private GrammaticalStructureFactory gsf;
|
|
private StanfordCoreNLP pipeline;
|
|
private StanfordCoreNLP pipelineSentiment;
|
|
private AbstractSequenceClassifier classifier;
|
|
private Annotation jmweStrAnnotation1;
|
|
private Annotation jmweStrAnnotation2;
|
|
private Annotation pipelineAnnotation1;
|
|
private Annotation pipelineAnnotation2;
|
|
private Annotation pipelineAnnotation1Sentiment;
|
|
private Annotation pipelineAnnotation2Sentiment;
|
|
private CoreDocument pipelineCoreDcoument1;
|
|
private CoreDocument pipelineCoreDcoument2;
|
|
|
|
public SentimentAnalyzerTest(String str, String str1, SimilarityMatrix smxParam, Annotation str1Annotation, Annotation str2Annotation,
|
|
Annotation strPipeline1, Annotation strPipeline2, Annotation strPipeSentiment1, Annotation strPipeSentiment2,
|
|
CoreDocument pipelineCoreDcoument1, CoreDocument pipelineCoreDcoument2) {
|
|
this.str = str;
|
|
this.str1 = str1;
|
|
this.smxParam = smxParam;
|
|
this.tagger = Datahandler.getTagger();
|
|
this.pipeline = Datahandler.getPipeline();
|
|
this.pipelineSentiment = Datahandler.getPipelineSentiment();
|
|
this.gsf = Datahandler.getGsf();
|
|
this.classifier = Datahandler.getClassifier();
|
|
this.jmweStrAnnotation1 = str1Annotation;
|
|
this.jmweStrAnnotation2 = str2Annotation;
|
|
this.pipelineAnnotation1 = strPipeline1;
|
|
this.pipelineAnnotation2 = strPipeline2;
|
|
this.pipelineAnnotation1Sentiment = strPipeSentiment1;
|
|
this.pipelineAnnotation2Sentiment = strPipeSentiment2;
|
|
this.pipelineCoreDcoument1 = pipelineCoreDcoument1;
|
|
this.pipelineCoreDcoument2 = pipelineCoreDcoument2;
|
|
}
|
|
|
|
@Override
|
|
public SimilarityMatrix call() {
|
|
Double score = -100.0;
|
|
try {
|
|
List<List<TaggedWord>> taggedwordlist1 = new ArrayList();
|
|
List<List<TaggedWord>> taggedwordlist2 = new ArrayList();
|
|
DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(str1));
|
|
//noneDelete
|
|
TokenizerFactory<CoreLabel> ptbTokenizerFactory
|
|
= PTBTokenizer.factory(new CoreLabelTokenFactory(), "untokenizable=firstDelete");
|
|
tokenizer.setTokenizerFactory(ptbTokenizerFactory);
|
|
for (List<HasWord> sentence : tokenizer) {
|
|
taggedwordlist1.add(tagger.tagSentence(sentence));
|
|
//taggedwordlist1.add(model.apply(tagger.tagSentence(sentence)).taggedYield());
|
|
}
|
|
tokenizer = new DocumentPreprocessor(new StringReader(str));
|
|
tokenizer.setTokenizerFactory(ptbTokenizerFactory);
|
|
for (List<HasWord> sentence : tokenizer) {
|
|
taggedwordlist2.add(tagger.tagSentence(sentence));
|
|
//taggedwordlist2.add(model.apply(tagger.tagSentence(sentence)).taggedYield());
|
|
}
|
|
int counter = 0;
|
|
int counter1 = 0;
|
|
counter = taggedwordlist2.stream().map((taggedlist2) -> taggedlist2.size()).reduce(counter, Integer::sum);
|
|
counter1 = taggedwordlist1.stream().map((taggedlist1) -> taggedlist1.size()).reduce(counter1, Integer::sum);
|
|
int overValue = counter >= counter1 ? counter - counter1 : counter1 - counter;
|
|
overValue *= 32;
|
|
score -= overValue;
|
|
ConcurrentMap<Integer, String> tgwlistIndex = new MapMaker().concurrencyLevel(2).makeMap();
|
|
taggedwordlist1.forEach((TGWList) -> {
|
|
TGWList.forEach((TaggedWord) -> {
|
|
if (!tgwlistIndex.values().contains(TaggedWord.tag()) && !TaggedWord.tag().equals(":")) {
|
|
tgwlistIndex.put(tgwlistIndex.size() + 1, TaggedWord.tag());
|
|
}
|
|
});
|
|
});
|
|
AtomicInteger runCount = new AtomicInteger(0);
|
|
taggedwordlist2.forEach((TGWList) -> {
|
|
TGWList.forEach((TaggedWord) -> {
|
|
if (tgwlistIndex.values().contains(TaggedWord.tag())) {
|
|
tgwlistIndex.values().remove(TaggedWord.tag());
|
|
runCount.getAndIncrement();
|
|
}
|
|
});
|
|
});
|
|
score += runCount.get() * 64;
|
|
ConcurrentMap<Integer, Tree> sentenceConstituencyParseList = new MapMaker().concurrencyLevel(2).makeMap();
|
|
try {
|
|
for (CoreMap sentence : pipelineAnnotation1.get(CoreAnnotations.SentencesAnnotation.class)) {
|
|
Tree sentenceConstituencyParse = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
|
|
sentenceConstituencyParseList.put(sentenceConstituencyParseList.size(), sentenceConstituencyParse);
|
|
}
|
|
ConcurrentMap<Integer, Integer> alltypeDepsSizeMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, Integer> summationMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (CoreMap sentence : pipelineAnnotation2.get(CoreAnnotations.SentencesAnnotation.class)) {
|
|
int constiRelationsize = 0;
|
|
Tree sentenceConstituencyParse = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
|
|
GrammaticalStructure gs = gsf.newGrammaticalStructure(sentenceConstituencyParse);
|
|
Collection<TypedDependency> allTypedDependencies = gs.allTypedDependencies();
|
|
ConcurrentMap<Integer, String> filerTreeContent = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (Tree sentenceConstituencyParse1 : sentenceConstituencyParseList.values()) {
|
|
Set<Constituent> constinuent1 = Tdiff.markDiff(sentenceConstituencyParse, sentenceConstituencyParse1);
|
|
Set<Constituent> constinuent2 = Tdiff.markDiff(sentenceConstituencyParse1, sentenceConstituencyParse);
|
|
ConcurrentMap<Integer, String> constiLabels = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (Constituent consti : constinuent1) {
|
|
for (Constituent consti1 : constinuent2) {
|
|
if (consti.value().equals(consti1.value()) && !constiLabels.values().contains(consti.value())) {
|
|
constiLabels.put(constiLabels.size(), consti.value());
|
|
constiRelationsize++;
|
|
}
|
|
}
|
|
}
|
|
int constituents1 = constinuent1.size() - constiRelationsize;
|
|
int constituents2 = constinuent2.size() - constiRelationsize;
|
|
if (constituents1 > 0 && constituents2 > 0) {
|
|
score -= (constituents1 + constituents2) * 200;
|
|
} else {
|
|
score += constiRelationsize * 200;
|
|
}
|
|
GrammaticalStructure gs1 = gsf.newGrammaticalStructure(sentenceConstituencyParse1);
|
|
Collection<TypedDependency> allTypedDependencies1 = gs1.allTypedDependencies();
|
|
int relationApplicable1 = 0;
|
|
int relationApplicable2 = 0;
|
|
int grammaticalRelation1 = 0;
|
|
int grammaticalRelation2 = 0;
|
|
for (TypedDependency TDY1 : allTypedDependencies1) {
|
|
IndexedWord dep = TDY1.dep();
|
|
IndexedWord gov = TDY1.gov();
|
|
GrammaticalRelation grammaticalRelation = gs.getGrammaticalRelation(gov, dep);
|
|
if (grammaticalRelation.isApplicable(sentenceConstituencyParse)) {
|
|
score += 1900;
|
|
grammaticalRelation1++;
|
|
}
|
|
GrammaticalRelation reln = TDY1.reln();
|
|
if (reln.isApplicable(sentenceConstituencyParse)) {
|
|
score += 525;
|
|
relationApplicable1++;
|
|
}
|
|
}
|
|
for (TypedDependency TDY : allTypedDependencies) {
|
|
IndexedWord dep = TDY.dep();
|
|
IndexedWord gov = TDY.gov();
|
|
GrammaticalRelation grammaticalRelation = gs1.getGrammaticalRelation(gov, dep);
|
|
if (grammaticalRelation.isApplicable(sentenceConstituencyParse)) {
|
|
score += 900;
|
|
grammaticalRelation2++;
|
|
}
|
|
GrammaticalRelation reln = TDY.reln();
|
|
if (reln.isApplicable(sentenceConstituencyParse1)) {
|
|
score += 525;
|
|
relationApplicable2++;
|
|
}
|
|
}
|
|
if ((grammaticalRelation1 == 0 && grammaticalRelation2 > 0) || (grammaticalRelation2 == 0 && grammaticalRelation1 > 0)) {
|
|
score -= 3450;
|
|
}
|
|
if (!allTypedDependencies.isEmpty() || !allTypedDependencies1.isEmpty()) {
|
|
int allTypeDep1 = allTypedDependencies.size();
|
|
int allTypeDep2 = allTypedDependencies1.size();
|
|
if (allTypeDep1 <= allTypeDep2 * 5 && allTypeDep2 <= allTypeDep1 * 5) {
|
|
if (!alltypeDepsSizeMap.values().contains(allTypeDep1)) {
|
|
score += allTypeDep1 * 600;
|
|
alltypeDepsSizeMap.put(alltypeDepsSizeMap.size() + 1, allTypeDep1);
|
|
}
|
|
if (!alltypeDepsSizeMap.values().contains(allTypeDep1)) {
|
|
score += allTypeDep2 * 600;
|
|
alltypeDepsSizeMap.put(alltypeDepsSizeMap.size() + 1, allTypeDep2);
|
|
}
|
|
}
|
|
if (allTypeDep1 >= 5 && allTypeDep2 >= 5) {
|
|
int largerTypeDep = allTypeDep1 > allTypeDep2 ? allTypeDep1 : allTypeDep2;
|
|
int smallerTypeDep = allTypeDep1 < allTypeDep2 ? allTypeDep1 : allTypeDep2;
|
|
int summation = largerTypeDep * largerTypeDep - smallerTypeDep * smallerTypeDep;
|
|
if (summation > 50 && summation < 75) {
|
|
score += summation * 80;
|
|
} else if (!summationMap.values().contains(summation)) {
|
|
score -= largerTypeDep * 500;
|
|
summationMap.put(summationMap.size() + 1, summation);
|
|
}
|
|
}
|
|
if (relationApplicable1 > 0 && relationApplicable2 > 0 && relationApplicable1 == relationApplicable2
|
|
&& grammaticalRelation1 > 0 && grammaticalRelation2 > 0 && grammaticalRelation1 == grammaticalRelation2) {
|
|
score += 3500;
|
|
} else {
|
|
score += allTypeDep1 > allTypeDep2
|
|
? (allTypeDep2 - allTypeDep1) * (allTypeDep2 * 50)
|
|
: (allTypeDep1 - allTypeDep2) * (allTypeDep1 * 50);
|
|
}
|
|
}
|
|
AtomicInteger runCount1 = new AtomicInteger(0);
|
|
sentenceConstituencyParse.taggedLabeledYield().forEach((LBW) -> {
|
|
sentenceConstituencyParse1.taggedLabeledYield().stream().filter((LBW1) -> (LBW.lemma().equals(LBW1.lemma())
|
|
&& !filerTreeContent.values().contains(LBW.lemma()))).map((_item) -> {
|
|
filerTreeContent.put(filerTreeContent.size() + 1, LBW.lemma());
|
|
return _item;
|
|
}).forEachOrdered((_item) -> {
|
|
runCount1.getAndIncrement();
|
|
});
|
|
});
|
|
score += runCount1.get() * 1500;
|
|
}
|
|
}
|
|
int typeSizeSmallest = 100;
|
|
int typeSizeLargest = 0;
|
|
for (Integer i : alltypeDepsSizeMap.values()) {
|
|
if (i > typeSizeLargest) {
|
|
typeSizeLargest = i;
|
|
}
|
|
if (i < typeSizeSmallest) {
|
|
typeSizeSmallest = i;
|
|
}
|
|
}
|
|
if (typeSizeLargest >= typeSizeSmallest * 3) {
|
|
score -= typeSizeLargest * 1600;
|
|
}
|
|
typeSizeLargest = 0;
|
|
typeSizeSmallest = 100;
|
|
for (int i : summationMap.values()) {
|
|
if (i > typeSizeLargest) {
|
|
typeSizeLargest = i;
|
|
}
|
|
if (i < typeSizeSmallest) {
|
|
typeSizeSmallest = i;
|
|
}
|
|
}
|
|
if (typeSizeLargest >= typeSizeSmallest * 3) {
|
|
score -= typeSizeLargest * 1600;
|
|
}
|
|
} catch (Exception ex) {
|
|
System.out.println("pipelineAnnotation stacktrace: " + ex.getLocalizedMessage() + "\n");
|
|
}
|
|
sentenceConstituencyParseList.clear();
|
|
ConcurrentMap<Integer, SimpleMatrix> simpleSMXlist = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, SimpleMatrix> simpleSMXlistVector = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, Integer> sentiment1 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, Integer> sentiment2 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (CoreMap sentence : pipelineAnnotation1Sentiment.get(CoreAnnotations.SentencesAnnotation.class)) {
|
|
Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
|
|
sentiment1.put(sentiment1.size(), RNNCoreAnnotations.getPredictedClass(tree));
|
|
SimpleMatrix predictions = RNNCoreAnnotations.getPredictions(tree);
|
|
SimpleMatrix nodeVector = RNNCoreAnnotations.getNodeVector(tree);
|
|
simpleSMXlist.put(simpleSMXlist.size(), predictions);
|
|
simpleSMXlistVector.put(simpleSMXlistVector.size() + 1, nodeVector);
|
|
}
|
|
ConcurrentMap<Integer, Double> elementSumCounter = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, Double> dotMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, Double> elementSumMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, Double> dotSumMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (CoreMap sentence : pipelineAnnotation2Sentiment.get(CoreAnnotations.SentencesAnnotation.class)) {
|
|
Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
|
|
sentiment2.put(sentiment2.size() + 1, RNNCoreAnnotations.getPredictedClass(tree));
|
|
SimpleMatrix predictions = RNNCoreAnnotations.getPredictions(tree);
|
|
SimpleMatrix nodeVector = RNNCoreAnnotations.getNodeVector(tree);
|
|
ConcurrentMap<Integer, Double> AccumulateDotMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, Double> subtractorMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, Double> dotPredictions = new MapMaker().concurrencyLevel(2).makeMap();
|
|
Double largest = 10.0;
|
|
Double shortest = 100.0;
|
|
for (SimpleMatrix simpleSMX : simpleSMXlist.values()) {
|
|
double dotPrediction = predictions.dot(simpleSMX) * 100;
|
|
AccumulateDotMap.put(AccumulateDotMap.size() + 1, dotPrediction);
|
|
double subtracter = dotPrediction > 50 ? dotPrediction - 100 : dotPrediction > 0 ? 100 - dotPrediction : 0;
|
|
subtractorMap.put(subtractorMap.size() + 1, subtracter);
|
|
if (!dotPredictions.values().contains(dotPrediction)) {
|
|
if (dotPrediction > largest) {
|
|
largest = dotPrediction;
|
|
}
|
|
if (dotPrediction < shortest) {
|
|
shortest = dotPrediction;
|
|
}
|
|
Double dotPredictionIntervalDifference = largest - shortest;
|
|
subtracter *= 25;
|
|
if (dotPredictionIntervalDifference < 5.0) {
|
|
if (dotPredictions.values().size() > 0) {
|
|
score += subtracter;
|
|
}
|
|
} else {
|
|
score -= subtracter;
|
|
}
|
|
} else {
|
|
subtracter -= 100;
|
|
subtracter *= 25;
|
|
score += subtracter * dotPrediction;
|
|
}
|
|
dotPredictions.put(dotPredictions.size() + 1, dotPrediction);
|
|
}
|
|
Double subTracPre = 0.0;
|
|
for (Double subtractors : subtractorMap.values()) {
|
|
if (Objects.equals(subTracPre, subtractors)) {
|
|
score -= 2000;
|
|
}
|
|
subTracPre = subtractors;
|
|
}
|
|
ConcurrentMap<Integer, Double> DotOverTransfer = dotPredictions;
|
|
dotPredictions = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (SimpleMatrix simpleSMX : simpleSMXlist.values()) {
|
|
double dotPrediction = simpleSMX.dot(predictions) * 100;
|
|
AccumulateDotMap.put(AccumulateDotMap.size() + 1, dotPrediction);
|
|
double subtracter = dotPrediction > 50 ? dotPrediction - 100 : dotPrediction > 0 ? 100 - dotPrediction : 0;
|
|
subtractorMap.put(subtractorMap.size() + 1, subtracter);
|
|
if (!dotPredictions.values().contains(dotPrediction)) {
|
|
subtracter *= 25;
|
|
int match = 0;
|
|
for (Double transferDots : DotOverTransfer.values()) {
|
|
if (transferDots == dotPrediction) {
|
|
score += subtracter;
|
|
match++;
|
|
}
|
|
}
|
|
if (match == 0) {
|
|
score -= subtracter;
|
|
}
|
|
} else {
|
|
subtracter -= 100;
|
|
subtracter *= 25;
|
|
score += subtracter * dotPrediction;
|
|
}
|
|
dotPredictions.put(dotPredictions.size() + 1, dotPrediction);
|
|
}
|
|
Double preAccumulatorDot = 0.0;
|
|
Double postAccumulatorDot = 0.0;
|
|
for (Double accumulators : AccumulateDotMap.values()) {
|
|
if (Objects.equals(preAccumulatorDot, accumulators)) {
|
|
if (Objects.equals(postAccumulatorDot, accumulators)) {
|
|
score -= 4000;
|
|
}
|
|
postAccumulatorDot = accumulators;
|
|
}
|
|
preAccumulatorDot = accumulators;
|
|
}
|
|
subTracPre = 0.0;
|
|
for (Double subtractors : subtractorMap.values()) {
|
|
if (Objects.equals(subTracPre, subtractors)) {
|
|
score -= 2000;
|
|
}
|
|
subTracPre = subtractors;
|
|
}
|
|
Double preDot = 0.0;
|
|
Double postDot = 0.0;
|
|
for (SimpleMatrix simpleSMX : simpleSMXlistVector.values()) {
|
|
double dot = nodeVector.dot(simpleSMX);
|
|
double elementSum = nodeVector.kron(simpleSMX).elementSum();
|
|
if (preDot == dot) {
|
|
if (postDot == dot) {
|
|
score -= 4000;
|
|
}
|
|
postDot = dot;
|
|
}
|
|
preDot = dot;
|
|
elementSum = Math.round(elementSum * 100.0) / 100.0;
|
|
elementSumCounter.put(elementSumCounter.size() + 1, elementSum);
|
|
dotMap.put(dotMap.size() + 1, dot);
|
|
if (!dotSumMap.values().contains(dot)) {
|
|
if (dot < 0.000) {
|
|
score += dot * 1500;
|
|
} else if (dot < 0.1) {
|
|
score += 256;
|
|
}
|
|
if (dot > 0.50) {
|
|
score -= 2400;
|
|
}
|
|
dotSumMap.put(dotSumMap.size() + 1, dot);
|
|
} else {
|
|
score -= 750;
|
|
}
|
|
if (!elementSumMap.values().contains(elementSum)) {
|
|
if (elementSum < 0.01 && elementSum > 0.00) {
|
|
score += 3300;
|
|
} else if (elementSum > 0.1 && elementSum < 0.2) {
|
|
score += 1100;
|
|
} else {
|
|
score -= elementSum * 1424;
|
|
}
|
|
elementSumMap.put(elementSumMap.size() + 1, elementSum);
|
|
} else {
|
|
score -= 750;
|
|
}
|
|
}
|
|
for (SimpleMatrix simpleSMX : simpleSMXlistVector.values()) {
|
|
double dot = simpleSMX.dot(nodeVector);
|
|
double elementSum = simpleSMX.kron(nodeVector).elementSum();
|
|
if (preDot == dot) {
|
|
if (postDot == dot) {
|
|
score -= 4000;
|
|
}
|
|
postDot = dot;
|
|
}
|
|
preDot = dot;
|
|
elementSum = Math.round(elementSum * 100.0) / 100.0;
|
|
elementSumCounter.put(elementSumCounter.size() + 1, elementSum);
|
|
dotMap.put(dotMap.size() + 1, dot);
|
|
if (!dotSumMap.values().contains(dot)) {
|
|
if (dot < 0.1) {
|
|
score += 256;
|
|
}
|
|
if (dot > 0.50) {
|
|
score -= 2400;
|
|
}
|
|
dotSumMap.put(dotSumMap.size() + 1, dot);
|
|
} else {
|
|
score -= 750;
|
|
}
|
|
if (!elementSumMap.values().contains(elementSum)) {
|
|
if (elementSum < 0.01 && elementSum > 0.00) {
|
|
score += 1300;
|
|
} else if (elementSum > 0.1 && elementSum < 1.0) {
|
|
score += 1100;
|
|
} else {
|
|
score -= elementSum * 1424;
|
|
}
|
|
elementSumMap.put(elementSumMap.size() + 1, elementSum);
|
|
} else {
|
|
score -= 750;
|
|
}
|
|
}
|
|
}
|
|
OptionalDouble minvalueDots = dotMap.values().stream().mapToDouble(Double::doubleValue).min();
|
|
OptionalDouble maxvalueDots = dotMap.values().stream().mapToDouble(Double::doubleValue).max();
|
|
double total = minvalueDots.getAsDouble() + maxvalueDots.getAsDouble();
|
|
boolean permitted = false;
|
|
if (minvalueDots.getAsDouble() != maxvalueDots.getAsDouble()) {
|
|
permitted = true;
|
|
}
|
|
if (permitted) {
|
|
Double dotsVariance = maxvalueDots.getAsDouble() - minvalueDots.getAsDouble();
|
|
if (maxvalueDots.getAsDouble() > minvalueDots.getAsDouble() * 10) {
|
|
score -= 5500;
|
|
} else if (minvalueDots.getAsDouble() < -0.10) {
|
|
score -= 3500;
|
|
} else if (dotsVariance < 0.5) {
|
|
score += 3500;
|
|
} else if (dotsVariance > minvalueDots.getAsDouble() * 2) {
|
|
score += 3500;
|
|
}
|
|
}
|
|
OptionalDouble minvalueElements = elementSumCounter.values().stream().mapToDouble(Double::doubleValue).min();
|
|
OptionalDouble maxvalueElements = elementSumCounter.values().stream().mapToDouble(Double::doubleValue).max();
|
|
Double elementsVariance = maxvalueElements.getAsDouble() - minvalueElements.getAsDouble();
|
|
if (elementsVariance == 0.0) {
|
|
score -= 550;
|
|
} else if (elementsVariance < 0.02 && elementsVariance > -0.01) {
|
|
score += 3500;
|
|
} else if (elementsVariance < 0.5 && maxvalueElements.getAsDouble() > 0.0 && minvalueElements.getAsDouble() > 0.0 && elementsVariance > 0.000) {
|
|
score += 3500;
|
|
} else if (minvalueElements.getAsDouble() < 0.0 && minvalueElements.getAsDouble() - maxvalueElements.getAsDouble() < 0.50) {
|
|
score -= 2500;
|
|
}
|
|
|
|
score -= (sentiment1.size() > sentiment2.size() ? sentiment1.size() - sentiment2.size() : sentiment2.size() - sentiment1.size()) * 500;
|
|
|
|
DocumentReaderAndWriter<CoreLabel> readerAndWriter = classifier.makePlainTextReaderAndWriter();
|
|
List classifyRaw1 = classifier.classifyRaw(str, readerAndWriter);
|
|
List classifyRaw2 = classifier.classifyRaw(str1, readerAndWriter);
|
|
score -= (classifyRaw1.size() > classifyRaw2.size() ? classifyRaw1.size() - classifyRaw2.size() : classifyRaw2.size() - classifyRaw1.size()) * 200;
|
|
|
|
int mainSentiment1 = 0;
|
|
int longest1 = 0;
|
|
int mainSentiment2 = 0;
|
|
int longest2 = 0;
|
|
for (CoreMap sentence : pipelineAnnotation1Sentiment.get(CoreAnnotations.SentencesAnnotation.class)) {
|
|
Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
|
|
int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
|
|
String partText = sentence.toString();
|
|
SimpleMatrix predictions = RNNCoreAnnotations.getPredictions(tree);
|
|
if (partText.length() > longest1) {
|
|
mainSentiment1 = sentiment;
|
|
longest1 = partText.length();
|
|
}
|
|
}
|
|
for (CoreMap sentence : pipelineAnnotation2Sentiment.get(CoreAnnotations.SentencesAnnotation.class)) {
|
|
Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
|
|
int sentiment = RNNCoreAnnotations.getPredictedClass(tree);
|
|
SimpleMatrix predictions = RNNCoreAnnotations.getPredictions(tree);
|
|
String partText = sentence.toString();
|
|
if (partText.length() > longest2) {
|
|
mainSentiment2 = sentiment;
|
|
longest2 = partText.length();
|
|
}
|
|
}
|
|
if (longest1 != longest2) {
|
|
long deffLongest = longest1 > longest2 ? longest1 : longest2;
|
|
long deffshorter = longest1 < longest2 ? longest1 : longest2;
|
|
if (deffLongest > deffshorter * 5) {
|
|
score -= 5500;
|
|
} else if (deffLongest < (deffshorter * 2) - 1 && deffLongest - deffshorter <= 45) {
|
|
score += (deffLongest - deffshorter) * 120;
|
|
} else if (mainSentiment1 != mainSentiment2 && deffLongest - deffshorter > 20 && deffLongest - deffshorter < 45) {
|
|
score += (deffLongest - deffshorter) * 20;
|
|
} else if (deffLongest - deffshorter < 2) {
|
|
score += (deffLongest + deffshorter) * 40;
|
|
} else if (deffshorter * 2 >= deffLongest && deffshorter * 2 < deffLongest + 5) {
|
|
score += deffLongest * 20;
|
|
} else {
|
|
score -= (deffLongest - deffshorter) * 50;
|
|
}
|
|
if (deffLongest - deffshorter <= 5) {
|
|
score += 2500;
|
|
}
|
|
}
|
|
int tokensCounter1 = 0;
|
|
int tokensCounter2 = 0;
|
|
int anotatorcounter1 = 0;
|
|
int anotatorcounter2 = 0;
|
|
int inflectedCounterPositive1 = 0;
|
|
int inflectedCounterPositive2 = 0;
|
|
int inflectedCounterNegative = 0;
|
|
int MarkedContinuousCounter1 = 0;
|
|
int MarkedContinuousCounter2 = 0;
|
|
Integer MarkedContiniousCounter1Entries = 0;
|
|
Integer MarkedContiniousCounter2Entries = 0;
|
|
int UnmarkedPatternCounter = 0;
|
|
ConcurrentMap<Integer, String> ITokenMapTag1 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> ITokenMapTag2 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> strTokenStems1 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> strTokenStems2 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> strTokenForm1 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> strTokenForm2 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> strTokenGetEntry1 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> strTokenGetEntry2 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> strTokenGetiPart1 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> strTokenGetiPart2 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> strTokenEntryPOS1 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> strTokenEntryPOS2 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, Integer> entryCounts1 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, Integer> entryCounts2 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
try {
|
|
List<CoreMap> sentences = jmweStrAnnotation1.get(CoreAnnotations.SentencesAnnotation.class);
|
|
for (CoreMap sentence : sentences) {
|
|
for (IMWE<IToken> token : sentence.get(JMWEAnnotation.class)) {
|
|
if (token.isInflected()) {
|
|
inflectedCounterPositive1++;
|
|
} else {
|
|
inflectedCounterNegative++;
|
|
}
|
|
strTokenForm1.put(strTokenForm1.size() + 1, token.getForm());
|
|
strTokenGetEntry1.put(strTokenGetEntry1.size() + 1, token.getEntry().toString().substring(token.getEntry().toString().length() - 1));
|
|
Collection<IMWEDesc.IPart> values = token.getPartMap().values();
|
|
IMWEDesc entry = token.getEntry();
|
|
MarkedContinuousCounter1 += entry.getMarkedContinuous();
|
|
UnmarkedPatternCounter += entry.getUnmarkedPattern();
|
|
for (IMWEDesc.IPart iPart : values) {
|
|
strTokenGetiPart1.put(strTokenGetiPart1.size() + 1, iPart.getForm());
|
|
}
|
|
for (String strPostPrefix : entry.getPOS().getPrefixes()) {
|
|
strTokenEntryPOS1.put(strTokenEntryPOS1.size() + 1, strPostPrefix);
|
|
}
|
|
for (int counts : entry.getCounts()) {
|
|
entryCounts1.put(entryCounts1.size() + 1, counts);
|
|
}
|
|
for (IToken tokens : token.getTokens()) {
|
|
ITokenMapTag1.put(ITokenMapTag1.size() + 1, tokens.getTag());
|
|
for (String strtoken : tokens.getStems()) {
|
|
strTokenStems1.put(strTokenStems1.size() + 1, strtoken);
|
|
MarkedContiniousCounter1Entries++;
|
|
}
|
|
}
|
|
tokensCounter1++;
|
|
}
|
|
anotatorcounter1++;
|
|
}
|
|
sentences = jmweStrAnnotation2.get(CoreAnnotations.SentencesAnnotation.class);
|
|
for (CoreMap sentence : sentences) {
|
|
for (IMWE<IToken> token : sentence.get(JMWEAnnotation.class)) {
|
|
if (token.isInflected()) {
|
|
inflectedCounterPositive2++;
|
|
} else {
|
|
inflectedCounterNegative--;
|
|
}
|
|
strTokenForm2.put(strTokenForm2.size() + 1, token.getForm());
|
|
strTokenGetEntry2.put(strTokenGetEntry2.size() + 1, token.getEntry().toString().substring(token.getEntry().toString().length() - 1));
|
|
Collection<IMWEDesc.IPart> values = token.getPartMap().values();
|
|
IMWEDesc entry = token.getEntry();
|
|
MarkedContinuousCounter2 += entry.getMarkedContinuous();
|
|
UnmarkedPatternCounter += entry.getUnmarkedPattern();
|
|
for (IMWEDesc.IPart iPart : values) {
|
|
strTokenGetiPart2.put(strTokenGetiPart2.size() + 1, iPart.getForm());
|
|
}
|
|
for (String strPostPrefix : entry.getPOS().getPrefixes()) {
|
|
strTokenEntryPOS2.put(strTokenEntryPOS2.size() + 1, strPostPrefix);
|
|
}
|
|
for (int counts : entry.getCounts()) {
|
|
entryCounts2.put(entryCounts2.size() + 1, counts);
|
|
}
|
|
for (IToken tokens : token.getTokens()) {
|
|
ITokenMapTag2.put(ITokenMapTag2.size() + 1, tokens.getTag());
|
|
for (String strtoken : tokens.getStems()) {
|
|
strTokenStems2.put(strTokenStems2.size() + 1, strtoken);
|
|
MarkedContiniousCounter2Entries++;
|
|
}
|
|
}
|
|
tokensCounter2++;
|
|
}
|
|
anotatorcounter2++;
|
|
}
|
|
} catch (Exception ex) {
|
|
System.out.println("SENTIMENT stacktrace: " + ex.getMessage() + "\n");
|
|
}
|
|
int entry1 = entryCounts1.values().size();
|
|
int entry2 = entryCounts2.values().size();
|
|
if ((entry1 >= entry2 * 5 && entry2 > 0) || (entry2 >= entry1 * 5 && entry1 > 0)) {
|
|
score -= entry1 > entry2 ? (entry1 - entry2) * 450 : (entry2 - entry1) * 450;
|
|
} else if (entry1 >= entry2 * 50 || entry2 >= entry1 * 50) {
|
|
score -= entry1 > entry2 ? entry1 * 180 : entry2 * 180;
|
|
} else if (entry1 >= entry2 * 2 || entry2 >= entry1 * 2) {
|
|
score += entry1 > entry2 ? (entry1 - entry2) * 450 : (entry2 - entry1) * 450;
|
|
} else if (entry1 == 0 && entry2 == 0) {
|
|
score -= 4500;
|
|
} else if (entry1 == entry2) {
|
|
score += 5500;
|
|
}
|
|
ConcurrentMap<Integer, Integer> countsMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (int counts : entryCounts1.values()) {
|
|
for (int counts1 : entryCounts2.values()) {
|
|
if (counts == counts1 && counts > 0 && !countsMap.values().contains(counts)) {
|
|
score += counts * 250;
|
|
countsMap.put(countsMap.size() + 1, counts);
|
|
}
|
|
}
|
|
}
|
|
if (strTokenEntryPOS1.values().size() > 1 && strTokenEntryPOS2.values().size() > 1) {
|
|
for (String strTokenPos1 : strTokenEntryPOS1.values()) {
|
|
for (String strTokenPos2 : strTokenEntryPOS2.values()) {
|
|
if (strTokenPos1.equals(strTokenPos2)) {
|
|
score += 500;
|
|
|
|
} else {
|
|
score -= 650;
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
if (UnmarkedPatternCounter > 0 && UnmarkedPatternCounter < 5) {
|
|
score -= UnmarkedPatternCounter * 1600;
|
|
} else {
|
|
score -= UnmarkedPatternCounter * 10;
|
|
}
|
|
|
|
if (MarkedContinuousCounter1 > 0 && MarkedContinuousCounter2 > 0) {
|
|
if (MarkedContinuousCounter1 > MarkedContinuousCounter2 * 50 || MarkedContinuousCounter2 > MarkedContinuousCounter1 * 50) {
|
|
score -= MarkedContinuousCounter1 > MarkedContinuousCounter2 ? MarkedContinuousCounter1 * 120 : MarkedContinuousCounter2 * 120;
|
|
} else if (!Objects.equals(MarkedContiniousCounter1Entries, MarkedContiniousCounter2Entries)
|
|
&& (MarkedContinuousCounter1 * 2 >= MarkedContinuousCounter2 * MarkedContinuousCounter1)
|
|
|| (MarkedContinuousCounter2 * 2 >= MarkedContinuousCounter1 * MarkedContinuousCounter2)) {
|
|
score += 4500;
|
|
} else if (MarkedContiniousCounter1Entries == 0 || MarkedContiniousCounter2Entries == 0) {
|
|
score += MarkedContinuousCounter1 > MarkedContinuousCounter2 ? (MarkedContinuousCounter2 - MarkedContinuousCounter1) * 500
|
|
: (MarkedContinuousCounter1 - MarkedContinuousCounter2) * 500;
|
|
}
|
|
if (MarkedContiniousCounter1Entries > 0 && MarkedContiniousCounter2Entries > 0 && MarkedContinuousCounter1 > 0
|
|
&& MarkedContinuousCounter2 > 0 && MarkedContinuousCounter1 < MarkedContinuousCounter2 * 10
|
|
&& MarkedContinuousCounter2 < MarkedContinuousCounter1 * 10) {
|
|
if (MarkedContiniousCounter1Entries > MarkedContiniousCounter2Entries * 5
|
|
|| MarkedContiniousCounter2Entries > MarkedContiniousCounter1Entries * 5
|
|
|| MarkedContiniousCounter1Entries * 5 < MarkedContinuousCounter1
|
|
|| MarkedContiniousCounter1Entries * 5 < MarkedContinuousCounter2
|
|
|| MarkedContiniousCounter2Entries * 5 < MarkedContinuousCounter1
|
|
|| MarkedContiniousCounter2Entries * 5 < MarkedContinuousCounter2) {
|
|
score -= MarkedContinuousCounter1 > MarkedContinuousCounter2 ? MarkedContinuousCounter1 * 400 : MarkedContinuousCounter2 * 400;
|
|
}
|
|
}
|
|
}
|
|
ConcurrentMap<Integer, String> strtokensMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (String strTokeniPart1 : strTokenGetiPart1.values()) {
|
|
for (String strTokeniPart2 : strTokenGetiPart2.values()) {
|
|
if (strTokeniPart1.equals(strTokeniPart2) && !strtokensMap.values().contains(strTokeniPart2)) {
|
|
strtokensMap.put(strtokensMap.size() + 1, strTokeniPart2);
|
|
score += 400;
|
|
} else {
|
|
score -= 200;
|
|
}
|
|
}
|
|
}
|
|
int tokenEntry1 = strTokenGetEntry1.values().size();
|
|
int tokenEntry2 = strTokenGetEntry2.values().size();
|
|
boolean boundariyLeacks = false;
|
|
int remnantCounter = 0;
|
|
if (tokenEntry1 * 2 != tokenEntry2 && tokenEntry2 * 2 != tokenEntry1) {
|
|
boundariyLeacks = true;
|
|
}
|
|
ConcurrentMap<Integer, String> entryTokenMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (String strTokenEntry1 : strTokenGetEntry1.values()) {
|
|
for (String strTokenEntry2 : strTokenGetEntry2.values()) {
|
|
if (!entryTokenMap.values().contains(strTokenEntry2)) {
|
|
if (strTokenEntry1.equals(strTokenEntry2)) {
|
|
score += boundariyLeacks ? 2500 : 2500 / 2;
|
|
} else if (!boundariyLeacks) {
|
|
score -= 1250;
|
|
} else {
|
|
remnantCounter++;
|
|
}
|
|
}
|
|
entryTokenMap.put(entryTokenMap.size() + 1, strTokenEntry2);
|
|
}
|
|
}
|
|
score -= remnantCounter * 250;
|
|
ConcurrentMap<Integer, String> iTokenMapTagsMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (String strmapTag : ITokenMapTag1.values()) {
|
|
for (String strmapTag1 : ITokenMapTag2.values()) {
|
|
if (strmapTag.equals(strmapTag1)) {
|
|
score -= 1450;
|
|
} else if (!iTokenMapTagsMap.values().contains(strmapTag)) {
|
|
score += 725;
|
|
iTokenMapTagsMap.put(iTokenMapTagsMap.size() + 1, strmapTag);
|
|
}
|
|
}
|
|
}
|
|
int tokenform1size = strTokenForm1.values().size();
|
|
int tokenform2size = strTokenForm2.values().size();
|
|
if (tokenform1size > 0 || tokenform2size > 0) {
|
|
if (tokenform1size < tokenform2size * 5 && tokenform2size < tokenform1size * 5) {
|
|
for (String strTokenForm1itr1 : strTokenForm1.values()) {
|
|
for (String strTokenForm1itr2 : strTokenForm2.values()) {
|
|
if (strTokenForm1itr1.equals(strTokenForm1itr2)) {
|
|
score -= 1600;
|
|
} else {
|
|
score += 500;
|
|
}
|
|
}
|
|
}
|
|
} else if (tokenform1size > 0 && tokenform2size > 0) {
|
|
score += tokenform1size > tokenform2size ? tokenform1size * 1600 : tokenform2size * 1600;
|
|
}
|
|
} else {
|
|
tokenform1size = tokenform1size > 0 ? tokenform1size : 1;
|
|
tokenform2size = tokenform2size > 0 ? tokenform2size : 1;
|
|
score -= (tokenform1size + tokenform2size) * 1200;
|
|
}
|
|
ConcurrentMap<Integer, String> tokenStemmingMap = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (String strTokenStem : strTokenStems1.values()) {
|
|
for (String strTokenStem1 : strTokenStems2.values()) {
|
|
if (strTokenStem.equals(strTokenStem1)) {
|
|
score += 1500;
|
|
} else if (!tokenStemmingMap.values().contains(strTokenStem)) {
|
|
score -= 150;
|
|
tokenStemmingMap.put(tokenStemmingMap.size() + 1, strTokenStem);
|
|
}
|
|
}
|
|
}
|
|
if (inflectedCounterPositive1 + inflectedCounterPositive2 > inflectedCounterNegative && inflectedCounterNegative > 0) {
|
|
score += (inflectedCounterPositive1 - inflectedCounterNegative) * 650;
|
|
}
|
|
if (inflectedCounterPositive1 > 0 && inflectedCounterPositive2 > 0) {
|
|
score += ((inflectedCounterPositive1 + inflectedCounterPositive2) - inflectedCounterNegative) * 550;
|
|
}
|
|
if (anotatorcounter1 > 1 && anotatorcounter2 > 1) {
|
|
score += (anotatorcounter1 - anotatorcounter2) * 400;
|
|
}
|
|
if ((tokensCounter1 > 0 && tokensCounter2 > 0) && tokensCounter1 < tokensCounter2 * 5 && tokensCounter2 < tokensCounter1 * 5) {
|
|
score += (tokensCounter1 + tokensCounter2) * 1400;
|
|
} else {
|
|
int elseint = tokensCounter1 >= tokensCounter2 ? (tokensCounter1 - tokensCounter2) * 500 : (tokensCounter2 - tokensCounter1) * 500;
|
|
if ((tokensCounter1 > tokensCounter2 * 5 || tokensCounter2 > tokensCounter1 * 5)
|
|
&& tokensCounter1 > 0 && tokensCounter2 > 0) {
|
|
score -= (tokensCounter1 + tokensCounter2) * 1500;
|
|
} else if (elseint > 0 && tokensCounter1 > 0 && tokensCounter2 > 0) {
|
|
score += elseint * 2;
|
|
} else if (elseint == 0) {
|
|
score += 1500;
|
|
}
|
|
}
|
|
LevenshteinDistance leven = new LevenshteinDistance(str, str1);
|
|
double SentenceScoreDiff = leven.computeLevenshteinDistance();
|
|
SentenceScoreDiff *= 15;
|
|
score -= SentenceScoreDiff;
|
|
ConcurrentMap<Integer, String> nerEntities1 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> nerEntities2 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> nerEntities3 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> nerEntities4 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> nerEntityTokenTags1 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
ConcurrentMap<Integer, String> nerEntityTokenTags2 = new MapMaker().concurrencyLevel(2).makeMap();
|
|
for (CoreEntityMention em : pipelineCoreDcoument1.entityMentions()) {
|
|
Set<Map.Entry<String, Double>> entrySet = em.entityTypeConfidences().entrySet();
|
|
String entityType = em.entityType();
|
|
Double EntityConfidences = 0.0;
|
|
for (Map.Entry<String, Double> entries : entrySet) {
|
|
EntityConfidences = entries.getValue();
|
|
}
|
|
List<CoreLabel> tokens = em.tokens();
|
|
for (CoreLabel token : tokens) {
|
|
if (!nerEntityTokenTags1.values().contains(token.tag())) {
|
|
if (entityType.equals("PERSON") && EntityConfidences < 0.80) {
|
|
score -= 6000;
|
|
} else {
|
|
nerEntityTokenTags1.put(nerEntityTokenTags1.size() + 1, token.tag());
|
|
}
|
|
}
|
|
}
|
|
if (!nerEntities1.values().contains(em.text())) {
|
|
nerEntities1.put(nerEntities1.size() + 1, em.text());
|
|
nerEntities3.put(nerEntities3.size() + 1, em.entityType());
|
|
}
|
|
}
|
|
for (CoreEntityMention em : pipelineCoreDcoument2.entityMentions()) {
|
|
Set<Map.Entry<String, Double>> entrySet = em.entityTypeConfidences().entrySet();
|
|
String entityType = em.entityType();
|
|
Double EntityConfidences = 0.0;
|
|
for (Map.Entry<String, Double> entries : entrySet) {
|
|
EntityConfidences = entries.getValue();
|
|
}
|
|
List<CoreLabel> tokens = em.tokens();
|
|
for (CoreLabel token : tokens) {
|
|
if (!nerEntityTokenTags2.values().contains(token.tag())) {
|
|
if (entityType.equals("PERSON") && EntityConfidences < 0.80) {
|
|
score -= 6000;
|
|
} else {
|
|
nerEntityTokenTags2.put(nerEntityTokenTags2.size() + 1, token.tag());
|
|
}
|
|
}
|
|
}
|
|
if (!nerEntities2.values().contains(em.text())) {
|
|
nerEntities2.put(nerEntities2.size() + 1, em.text());
|
|
nerEntities4.put(nerEntities4.size() + 1, em.entityType());
|
|
}
|
|
}
|
|
for (String strEnts1 : nerEntities1.values()) {
|
|
Collection<String> values = nerEntities2.values();
|
|
for (String strEnts2 : values) {
|
|
if (strEnts1.equalsIgnoreCase(strEnts2)) {
|
|
score += 7500;
|
|
}
|
|
}
|
|
}
|
|
for (String strEnts1 : nerEntities3.values()) {
|
|
if (nerEntities4.values().contains(strEnts1)) {
|
|
score += 3500;
|
|
}
|
|
}
|
|
for (String strToken : nerEntityTokenTags1.values()) {
|
|
if (nerEntityTokenTags2.values().contains(strToken)) {
|
|
score += 2500;
|
|
}
|
|
}
|
|
} catch (Exception ex) {
|
|
System.out.println("SENTIMENT stacktrace Overall catch: " + ex.getMessage() + "\n");
|
|
}
|
|
System.out.println("Final current score: " + score + "\nSentence 1: " + str + "\nSentence 2: " + str1 + "\n");
|
|
smxParam.setDistance(score);
|
|
return smxParam;
|
|
}
|
|
}
|