package FunctionLayer.StanfordParser; import FunctionLayer.LevenshteinDistance; import FunctionLayer.Datahandler; import FunctionLayer.SimilarityMatrix; import com.google.common.collect.MapMaker; import edu.mit.jmwe.data.IMWE; import edu.mit.jmwe.data.IMWEDesc; import edu.mit.jmwe.data.IToken; import edu.stanford.nlp.ie.AbstractSequenceClassifier; import edu.stanford.nlp.ling.CoreAnnotations; import edu.stanford.nlp.ling.CoreLabel; import edu.stanford.nlp.ling.HasWord; import edu.stanford.nlp.ling.IndexedWord; import edu.stanford.nlp.ling.JMWEAnnotation; import edu.stanford.nlp.ling.TaggedWord; import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations; import edu.stanford.nlp.parser.shiftreduce.ShiftReduceParser; import edu.stanford.nlp.pipeline.Annotation; import edu.stanford.nlp.pipeline.StanfordCoreNLP; import edu.stanford.nlp.process.CoreLabelTokenFactory; import edu.stanford.nlp.process.DocumentPreprocessor; import edu.stanford.nlp.process.PTBTokenizer; import edu.stanford.nlp.process.TokenizerFactory; import edu.stanford.nlp.sentiment.SentimentCoreAnnotations; import edu.stanford.nlp.sequences.DocumentReaderAndWriter; import edu.stanford.nlp.tagger.maxent.MaxentTagger; import edu.stanford.nlp.trees.Constituent; import edu.stanford.nlp.trees.GrammaticalRelation; import edu.stanford.nlp.trees.GrammaticalStructure; import edu.stanford.nlp.trees.GrammaticalStructureFactory; import edu.stanford.nlp.trees.Tree; import edu.stanford.nlp.trees.TreeCoreAnnotations; import edu.stanford.nlp.trees.TypedDependency; import edu.stanford.nlp.trees.tregex.gui.Tdiff; import edu.stanford.nlp.util.CoreMap; import java.io.StringReader; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Objects; import java.util.OptionalDouble; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BinaryOperator; import java.util.function.Function; import org.ejml.simple.SimpleMatrix; /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ /** * * @author install1 */ public class SentimentAnalyzerTest implements Callable { private SimilarityMatrix smxParam; private String str; private String str1; private ShiftReduceParser model; private MaxentTagger tagger; private GrammaticalStructureFactory gsf; private StanfordCoreNLP pipeline; private StanfordCoreNLP pipelineSentiment; private AbstractSequenceClassifier classifier; private Annotation jmweStrAnnotation1; private Annotation jmweStrAnnotation2; private Annotation pipelineAnnotation1; private Annotation pipelineAnnotation2; private Annotation pipelineAnnotation1Sentiment; private Annotation pipelineAnnotation2Sentiment; public SentimentAnalyzerTest(String str, String str1, SimilarityMatrix smxParam, Annotation str1Annotation, Annotation str2Annotation, Annotation strPipeline1, Annotation strPipeline2, Annotation strPipeSentiment1, Annotation strPipeSentiment2) { this.str = str; this.str1 = str1; this.smxParam = smxParam; this.model = Datahandler.getModel(); this.tagger = Datahandler.getTagger(); this.pipeline = Datahandler.getPipeline(); this.pipelineSentiment = Datahandler.getPipelineSentiment(); this.gsf = Datahandler.getGsf(); this.classifier = Datahandler.getClassifier(); this.jmweStrAnnotation1 = str1Annotation; this.jmweStrAnnotation2 = str2Annotation; this.pipelineAnnotation1 = strPipeline1; this.pipelineAnnotation2 = strPipeline2; this.pipelineAnnotation1Sentiment = strPipeSentiment1; //maybe process? this.pipelineAnnotation2Sentiment = strPipeSentiment2; } @Override public SimilarityMatrix call() { Double score = -100.0; try { List> taggedwordlist1 = new ArrayList(); List> taggedwordlist2 = new ArrayList(); DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(str1)); //noneDelete TokenizerFactory ptbTokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), "untokenizable=firstDelete"); tokenizer.setTokenizerFactory(ptbTokenizerFactory); for (List sentence : tokenizer) { taggedwordlist1.add(model.apply(tagger.tagSentence(sentence)).taggedYield()); } tokenizer = new DocumentPreprocessor(new StringReader(str)); tokenizer.setTokenizerFactory(ptbTokenizerFactory); for (List sentence : tokenizer) { taggedwordlist2.add(model.apply(tagger.tagSentence(sentence)).taggedYield()); } int counter = 0; int counter1 = 0; counter = taggedwordlist2.stream().map((taggedlist2) -> taggedlist2.size()).reduce(counter, Integer::sum); counter1 = taggedwordlist1.stream().map((taggedlist1) -> taggedlist1.size()).reduce(counter1, Integer::sum); int overValue = counter >= counter1 ? counter - counter1 : counter1 - counter; overValue *= 32; score -= overValue; ConcurrentMap tgwlistIndex = new MapMaker().concurrencyLevel(2).makeMap(); taggedwordlist1.forEach((TGWList) -> { TGWList.forEach((TaggedWord) -> { if (!tgwlistIndex.values().contains(TaggedWord.tag()) && !TaggedWord.tag().equals(":")) { tgwlistIndex.put(tgwlistIndex.size() + 1, TaggedWord.tag()); } }); }); AtomicInteger runCount = new AtomicInteger(0); taggedwordlist2.forEach((TGWList) -> { TGWList.forEach((TaggedWord) -> { if (tgwlistIndex.values().contains(TaggedWord.tag())) { tgwlistIndex.values().remove(TaggedWord.tag()); runCount.getAndIncrement(); } }); }); score += runCount.get() * 64; ConcurrentMap sentenceConstituencyParseList = new MapMaker().concurrencyLevel(2).makeMap(); try { for (CoreMap sentence : pipelineAnnotation1.get(CoreAnnotations.SentencesAnnotation.class)) { Tree sentenceConstituencyParse = sentence.get(TreeCoreAnnotations.TreeAnnotation.class); sentenceConstituencyParseList.put(sentenceConstituencyParseList.size(), sentenceConstituencyParse); } for (CoreMap sentence : pipelineAnnotation2.get(CoreAnnotations.SentencesAnnotation.class)) { int constiRelationsize = 0; Tree sentenceConstituencyParse = sentence.get(TreeCoreAnnotations.TreeAnnotation.class); GrammaticalStructure gs = gsf.newGrammaticalStructure(sentenceConstituencyParse); Collection allTypedDependencies = gs.allTypedDependencies(); ConcurrentMap filerTreeContent = new MapMaker().concurrencyLevel(2).makeMap(); for (Tree sentenceConstituencyParse1 : sentenceConstituencyParseList.values()) { Set constinuent1 = Tdiff.markDiff(sentenceConstituencyParse, sentenceConstituencyParse1); Set constinuent2 = Tdiff.markDiff(sentenceConstituencyParse1, sentenceConstituencyParse); ConcurrentMap constiLabels = new MapMaker().concurrencyLevel(2).makeMap(); for (Constituent consti : constinuent1) { for (Constituent consti1 : constinuent2) { if (consti.value().equals(consti1.value()) && !constiLabels.values().contains(consti.value())) { constiLabels.put(constiLabels.size(), consti.value()); constiRelationsize++; } } } int constituents1 = constinuent1.size() - constiRelationsize; int constituents2 = constinuent2.size() - constiRelationsize; if (constituents1 > 0 && constituents2 > 0) { score -= (constituents1 + constituents2) * 200; } else { score += constiRelationsize * 200; } GrammaticalStructure gs1 = gsf.newGrammaticalStructure(sentenceConstituencyParse1); Collection allTypedDependencies1 = gs1.allTypedDependencies(); int relationApplicable1 = 0; int relationApplicable2 = 0; for (TypedDependency TDY1 : allTypedDependencies1) { IndexedWord dep = TDY1.dep(); IndexedWord gov = TDY1.gov(); GrammaticalRelation grammaticalRelation = gs.getGrammaticalRelation(gov, dep); if (grammaticalRelation.isApplicable(sentenceConstituencyParse)) { score += 1900; } GrammaticalRelation reln = TDY1.reln(); if (reln.isApplicable(sentenceConstituencyParse)) { score += 525; relationApplicable1++; } } for (TypedDependency TDY : allTypedDependencies) { IndexedWord dep = TDY.dep(); IndexedWord gov = TDY.gov(); GrammaticalRelation grammaticalRelation = gs1.getGrammaticalRelation(gov, dep); if (grammaticalRelation.isApplicable(sentenceConstituencyParse)) { score += 900; } GrammaticalRelation reln = TDY.reln(); if (reln.isApplicable(sentenceConstituencyParse1)) { score += 525; relationApplicable2++; } } if (!allTypedDependencies.isEmpty() || !allTypedDependencies1.isEmpty()) { if (relationApplicable1 > 0 && relationApplicable2 > 0) { score += 3500; } else { score -= allTypedDependencies.size() > allTypedDependencies1.size() ? (allTypedDependencies.size() - allTypedDependencies1.size()) * (allTypedDependencies.size() * 160) : (allTypedDependencies1.size() - allTypedDependencies.size()) * (allTypedDependencies1.size() * 160); } } AtomicInteger runCount1 = new AtomicInteger(0); sentenceConstituencyParse.taggedLabeledYield().forEach((LBW) -> { sentenceConstituencyParse1.taggedLabeledYield().stream().filter((LBW1) -> (LBW.lemma().equals(LBW1.lemma()) && !filerTreeContent.values().contains(LBW.lemma()))).map((_item) -> { filerTreeContent.put(filerTreeContent.size() + 1, LBW.lemma()); return _item; }).forEachOrdered((_item) -> { runCount1.getAndIncrement(); }); }); score += runCount1.get() * 1500; } } } catch (Exception ex) { System.out.println("pipelineAnnotation stacktrace: " + ex.getLocalizedMessage() + "\n"); } sentenceConstituencyParseList.clear(); ConcurrentMap simpleSMXlist = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap simpleSMXlistVector = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap sentiment1 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap sentiment2 = new MapMaker().concurrencyLevel(2).makeMap(); for (CoreMap sentence : pipelineAnnotation1Sentiment.get(CoreAnnotations.SentencesAnnotation.class)) { Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class); sentiment1.put(sentiment1.size(), RNNCoreAnnotations.getPredictedClass(tree)); SimpleMatrix predictions = RNNCoreAnnotations.getPredictions(tree); SimpleMatrix nodeVector = RNNCoreAnnotations.getNodeVector(tree); simpleSMXlist.put(simpleSMXlist.size(), predictions); simpleSMXlistVector.put(simpleSMXlistVector.size() + 1, nodeVector); } ConcurrentMap elementSumCounter = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap dotMap = new MapMaker().concurrencyLevel(2).makeMap(); for (CoreMap sentence : pipelineAnnotation2Sentiment.get(CoreAnnotations.SentencesAnnotation.class)) { Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class); sentiment2.put(sentiment2.size() + 1, RNNCoreAnnotations.getPredictedClass(tree)); SimpleMatrix predictions = RNNCoreAnnotations.getPredictions(tree); SimpleMatrix nodeVector = RNNCoreAnnotations.getNodeVector(tree); ConcurrentMap AccumulateDotMap = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap subtractorMap = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap dotPredictions = new MapMaker().concurrencyLevel(2).makeMap(); Double largest = 10.0; Double shortest = 100.0; for (SimpleMatrix simpleSMX : simpleSMXlist.values()) { double dotPrediction = predictions.dot(simpleSMX) * 100; AccumulateDotMap.put(AccumulateDotMap.size() + 1, dotPrediction); double subtracter = dotPrediction > 50 ? dotPrediction - 100 : dotPrediction > 0 ? 100 - dotPrediction : 0; subtractorMap.put(subtractorMap.size() + 1, subtracter); if (!dotPredictions.values().contains(dotPrediction)) { if (dotPrediction > largest) { largest = dotPrediction; } if (dotPrediction < shortest) { shortest = dotPrediction; } Double dotPredictionIntervalDifference = largest - shortest; subtracter *= 25; if (dotPredictionIntervalDifference < 5.0) { if (dotPredictions.values().size() > 0) { score += subtracter; } } else { score -= subtracter; } } else { subtracter -= 100; subtracter *= 25; score += subtracter * dotPrediction; } dotPredictions.put(dotPredictions.size() + 1, dotPrediction); } Double subTracPre = 0.0; for (Double subtractors : subtractorMap.values()) { if (Objects.equals(subTracPre, subtractors)) { score -= 2000; } subTracPre = subtractors; } ConcurrentMap DotOverTransfer = dotPredictions; dotPredictions = new MapMaker().concurrencyLevel(2).makeMap(); for (SimpleMatrix simpleSMX : simpleSMXlist.values()) { double dotPrediction = simpleSMX.dot(predictions) * 100; AccumulateDotMap.put(AccumulateDotMap.size() + 1, dotPrediction); double subtracter = dotPrediction > 50 ? dotPrediction - 100 : dotPrediction > 0 ? 100 - dotPrediction : 0; subtractorMap.put(subtractorMap.size() + 1, subtracter); if (!dotPredictions.values().contains(dotPrediction)) { subtracter *= 25; int match = 0; for (Double transferDots : DotOverTransfer.values()) { if (transferDots == dotPrediction) { score += subtracter; match++; } } if (match == 0) { score -= subtracter; } } else { subtracter -= 100; subtracter *= 25; score += subtracter * dotPrediction; } dotPredictions.put(dotPredictions.size() + 1, dotPrediction); } Double preAccumulatorDot = 0.0; Double postAccumulatorDot = 0.0; for (Double accumulators : AccumulateDotMap.values()) { if (Objects.equals(preAccumulatorDot, accumulators)) { if (Objects.equals(postAccumulatorDot, accumulators)) { score -= 4000; } postAccumulatorDot = accumulators; } preAccumulatorDot = accumulators; } subTracPre = 0.0; for (Double subtractors : subtractorMap.values()) { if (Objects.equals(subTracPre, subtractors)) { score -= 2000; } subTracPre = subtractors; } Double preDot = 0.0; Double postDot = 0.0; for (SimpleMatrix simpleSMX : simpleSMXlistVector.values()) { double dot = nodeVector.dot(simpleSMX); double elementSum = nodeVector.kron(simpleSMX).elementSum(); if (preDot == dot) { if (postDot == dot) { score -= 4000; } postDot = dot; } preDot = dot; elementSum = Math.round(elementSum * 100.0) / 100.0; elementSumCounter.put(elementSumCounter.size() + 1, elementSum); dotMap.put(dotMap.size() + 1, dot); if (dot < 0.000) { score += dot * 1500; } else if (dot < 0.1) { score += 256; } if (dot > 0.50) { score -= 2400; } if (elementSum < 0.01 && elementSum > 0.00) { score += 3300; } else if (elementSum > 0.1 && elementSum < 0.2) { score += 1100; } else { score -= elementSum * 1424; } } for (SimpleMatrix simpleSMX : simpleSMXlistVector.values()) { double dot = simpleSMX.dot(nodeVector); double elementSum = simpleSMX.kron(nodeVector).elementSum(); if (preDot == dot) { if (postDot == dot) { score -= 4000; } postDot = dot; } preDot = dot; elementSum = Math.round(elementSum * 100.0) / 100.0; elementSumCounter.put(elementSumCounter.size() + 1, elementSum); dotMap.put(dotMap.size() + 1, dot); if (dot < 0.1) { score += 256; } if (dot > 0.50) { score -= 2400; } if (elementSum < 0.01 && elementSum > 0.00) { score += 1300; } else if (elementSum > 0.1 && elementSum < 1.0) { score += 1100; } else { score -= elementSum * 1424; } } } OptionalDouble minvalueDots = dotMap.values().stream().mapToDouble(Double::doubleValue).min(); OptionalDouble maxvalueDots = dotMap.values().stream().mapToDouble(Double::doubleValue).max(); double total = minvalueDots.getAsDouble() + maxvalueDots.getAsDouble(); boolean permitted = false; if (minvalueDots.getAsDouble() != maxvalueDots.getAsDouble()) { permitted = true; } if (permitted) { Double dotsVariance = maxvalueDots.getAsDouble() - minvalueDots.getAsDouble(); if (maxvalueDots.getAsDouble() > minvalueDots.getAsDouble() * 10) { score -= 5500; } else if (minvalueDots.getAsDouble() < -0.10) { score -= 3500; } else if (dotsVariance < 0.5) { score += 3500; } else if (dotsVariance > minvalueDots.getAsDouble() * 2) { score += 3500; } } OptionalDouble minvalueElements = elementSumCounter.values().stream().mapToDouble(Double::doubleValue).min(); OptionalDouble maxvalueElements = elementSumCounter.values().stream().mapToDouble(Double::doubleValue).max(); Double elementsVariance = maxvalueElements.getAsDouble() - minvalueElements.getAsDouble(); if (elementsVariance < 0.05 && maxvalueElements.getAsDouble() > 0.0 && minvalueElements.getAsDouble() > 0.0 && elementsVariance > 0.000) { score += 3500; } else if (minvalueElements.getAsDouble() < 0.0 && minvalueElements.getAsDouble() - maxvalueElements.getAsDouble() < 0.50) { score -= 2500; } score -= (sentiment1.size() > sentiment2.size() ? sentiment1.size() - sentiment2.size() : sentiment2.size() - sentiment1.size()) * 500; DocumentReaderAndWriter readerAndWriter = classifier.makePlainTextReaderAndWriter(); List classifyRaw1 = classifier.classifyRaw(str, readerAndWriter); List classifyRaw2 = classifier.classifyRaw(str1, readerAndWriter); score -= (classifyRaw1.size() > classifyRaw2.size() ? classifyRaw1.size() - classifyRaw2.size() : classifyRaw2.size() - classifyRaw1.size()) * 200; int mainSentiment1 = 0; int longest1 = 0; int mainSentiment2 = 0; int longest2 = 0; for (CoreMap sentence : pipelineAnnotation1Sentiment.get(CoreAnnotations.SentencesAnnotation.class)) { Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class); int sentiment = RNNCoreAnnotations.getPredictedClass(tree); String partText = sentence.toString(); SimpleMatrix predictions = RNNCoreAnnotations.getPredictions(tree); if (partText.length() > longest1) { mainSentiment1 = sentiment; longest1 = partText.length(); } } for (CoreMap sentence : pipelineAnnotation2Sentiment.get(CoreAnnotations.SentencesAnnotation.class)) { Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class); int sentiment = RNNCoreAnnotations.getPredictedClass(tree); SimpleMatrix predictions = RNNCoreAnnotations.getPredictions(tree); String partText = sentence.toString(); if (partText.length() > longest2) { mainSentiment2 = sentiment; longest2 = partText.length(); } } if (longest1 != longest2) { long deffLongest = longest1 > longest2 ? longest1 : longest2; long deffshorter = longest1 < longest2 ? longest1 : longest2; if (deffLongest > deffshorter * 5) { score -= 5500; } else if (deffLongest < (deffshorter * 2) - 1 && deffLongest - deffshorter <= 45) { score += (deffLongest - deffshorter) * 60; } else if (mainSentiment1 != mainSentiment2 && deffLongest - deffshorter > 20 && deffLongest - deffshorter < 45) { score += (deffLongest - deffshorter) * 120; } else if (deffLongest - deffshorter < 2) { score += (deffLongest + deffshorter) * 40; } else if (deffshorter * 2 >= deffLongest && deffshorter * 2 < deffLongest + 5) { score += deffLongest * 160; } else { score -= (deffLongest - deffshorter) * 50; } if (deffLongest - deffshorter <= 5) { score += 2500; } } int tokensCounter1 = 0; int tokensCounter2 = 0; int anotatorcounter1 = 0; int anotatorcounter2 = 0; int inflectedCounterPositive1 = 0; int inflectedCounterPositive2 = 0; int inflectedCounterNegative = 0; int MarkedContinuousCounter1 = 0; int MarkedContinuousCounter2 = 0; Integer MarkedContiniousCounter1Entries = 0; Integer MarkedContiniousCounter2Entries = 0; int UnmarkedPatternCounter = 0; ConcurrentMap ITokenMapTag1 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap ITokenMapTag2 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap strTokenStems1 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap strTokenStems2 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap strTokenForm1 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap strTokenForm2 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap strTokenGetEntry1 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap strTokenGetEntry2 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap strTokenGetiPart1 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap strTokenGetiPart2 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap strTokenEntryPOS1 = new MapMaker().concurrencyLevel(2).makeMap(); ConcurrentMap strTokenEntryPOS2 = new MapMaker().concurrencyLevel(2).makeMap(); try { List sentences = jmweStrAnnotation1.get(CoreAnnotations.SentencesAnnotation.class); for (CoreMap sentence : sentences) { for (IMWE token : sentence.get(JMWEAnnotation.class)) { if (token.isInflected()) { inflectedCounterPositive1++; } else { inflectedCounterNegative++; } strTokenForm1.put(strTokenForm1.size() + 1, token.getForm()); strTokenGetEntry1.put(strTokenGetEntry1.size() + 1, token.getEntry().toString().substring(token.getEntry().toString().length() - 1)); Collection values = token.getPartMap().values(); IMWEDesc entry = token.getEntry(); MarkedContinuousCounter1 += entry.getMarkedContinuous(); UnmarkedPatternCounter += entry.getUnmarkedPattern(); for (IMWEDesc.IPart iPart : values) { strTokenGetiPart1.put(strTokenGetiPart1.size() + 1, iPart.getForm()); } for (String strPostPrefix : entry.getPOS().getPrefixes()) { strTokenEntryPOS1.put(strTokenEntryPOS1.size() + 1, strPostPrefix); } for (IToken tokens : token.getTokens()) { ITokenMapTag1.put(ITokenMapTag1.size() + 1, tokens.getTag()); for (String strtoken : tokens.getStems()) { strTokenStems1.put(strTokenStems1.size() + 1, strtoken); MarkedContiniousCounter1Entries++; } } tokensCounter1++; } anotatorcounter1++; } sentences = jmweStrAnnotation2.get(CoreAnnotations.SentencesAnnotation.class); for (CoreMap sentence : sentences) { for (IMWE token : sentence.get(JMWEAnnotation.class)) { if (token.isInflected()) { inflectedCounterPositive2++; } else { inflectedCounterNegative--; } strTokenForm2.put(strTokenForm2.size() + 1, token.getForm()); strTokenGetEntry2.put(strTokenGetEntry2.size() + 1, token.getEntry().toString().substring(token.getEntry().toString().length() - 1)); Collection values = token.getPartMap().values(); IMWEDesc entry = token.getEntry(); MarkedContinuousCounter2 += entry.getMarkedContinuous(); UnmarkedPatternCounter += entry.getUnmarkedPattern(); for (IMWEDesc.IPart iPart : values) { strTokenGetiPart2.put(strTokenGetiPart2.size() + 1, iPart.getForm()); } for (String strPostPrefix : entry.getPOS().getPrefixes()) { strTokenEntryPOS2.put(strTokenEntryPOS2.size() + 1, strPostPrefix); } for (IToken tokens : token.getTokens()) { ITokenMapTag2.put(ITokenMapTag2.size() + 1, tokens.getTag()); for (String strtoken : tokens.getStems()) { strTokenStems2.put(strTokenStems2.size() + 1, strtoken); MarkedContiniousCounter2Entries++; } } tokensCounter2++; } anotatorcounter2++; } } catch (Exception ex) { System.out.println("SENTIMENT stacktrace: " + ex.getMessage() + "\n"); } for (String strTokenPos1 : strTokenEntryPOS1.values()) { for (String strTokenPos2 : strTokenEntryPOS2.values()) { if (strTokenPos1.equals(strTokenPos2)) { score += 500; } } } if (UnmarkedPatternCounter > 0 && UnmarkedPatternCounter < 5) { score -= UnmarkedPatternCounter * 1600; } if (MarkedContinuousCounter1 > 0 && MarkedContinuousCounter2 > 0) { if (!Objects.equals(MarkedContiniousCounter1Entries, MarkedContiniousCounter2Entries) && (MarkedContinuousCounter1 * 2 >= MarkedContinuousCounter2 * MarkedContinuousCounter1) || (MarkedContinuousCounter2 * 2 >= MarkedContinuousCounter1 * MarkedContinuousCounter2)) { score += MarkedContinuousCounter1 > MarkedContinuousCounter2 ? (MarkedContinuousCounter1 - MarkedContinuousCounter2) * 500 : (MarkedContinuousCounter2 - MarkedContinuousCounter1) * 500; } else { score += MarkedContinuousCounter1 > MarkedContinuousCounter2 ? (MarkedContinuousCounter2 - MarkedContinuousCounter1) * 500 : (MarkedContinuousCounter1 - MarkedContinuousCounter2) * 500; } } for (String strTokeniPart1 : strTokenGetiPart1.values()) { for (String strTokeniPart2 : strTokenGetiPart2.values()) { if (strTokeniPart1.equals(strTokeniPart2)) { score += 400; } } } for (String strTokenEntry1 : strTokenGetEntry1.values()) { for (String strTokenEntry2 : strTokenGetEntry2.values()) { if (strTokenEntry1.equals(strTokenEntry2)) { score += 2500; } } } for (String strmapTag : ITokenMapTag1.values()) { for (String strmapTag1 : ITokenMapTag2.values()) { if (strmapTag.equals(strmapTag1)) { score += 1450; } } } for (String strTokenForm1itr1 : strTokenForm1.values()) { for (String strTokenForm1itr2 : strTokenForm2.values()) { if (strTokenForm1itr1.equals(strTokenForm1itr2)) { score += 2600; } else if (strTokenForm1itr1.contains(strTokenForm1itr2)) { score += 500; } } } for (String strTokenStem : strTokenStems1.values()) { for (String strTokenStem1 : strTokenStems2.values()) { if (strTokenStem.equals(strTokenStem1)) { score += 1500; } } } if (inflectedCounterPositive1 + inflectedCounterPositive2 > inflectedCounterNegative && inflectedCounterNegative > 0) { score += (inflectedCounterPositive1 - inflectedCounterNegative) * 650; } if (inflectedCounterPositive1 > 0 && inflectedCounterPositive2 > 0) { score += ((inflectedCounterPositive1 + inflectedCounterPositive2) - inflectedCounterNegative) * 550; } if (anotatorcounter1 > 1 && anotatorcounter2 > 1) { score += (anotatorcounter1 - anotatorcounter2) * 400; } if (tokensCounter1 > 0 && tokensCounter2 > 0) { score += (tokensCounter1 + tokensCounter2) * 400; } else { int elseint = tokensCounter1 >= tokensCounter2 ? (tokensCounter1 - tokensCounter2) * 500 : (tokensCounter2 - tokensCounter1) * 500; if (elseint > 0) { score -= elseint * 2; } } LevenshteinDistance leven = new LevenshteinDistance(str, str1); double SentenceScoreDiff = leven.computeLevenshteinDistance(); SentenceScoreDiff *= 15; score -= SentenceScoreDiff; } catch (Exception ex) { System.out.println("SENTIMENT stacktrace Overall catch: " + ex.getMessage() + "\n"); } System.out.println("Final current score: " + score + "\nSentence 1: " + str + "\nSentence 2: " + str1 + "\n"); smxParam.setDistance(score); return smxParam; } }