reduced threads for secondary processing and added threadpool for udp messages. furthermore moved logic around so response concat can be valued

This commit is contained in:
christian 2021-12-07 22:26:19 +01:00
parent e9969f836e
commit e234ce0109
3 changed files with 298 additions and 275 deletions

View File

@ -89,7 +89,7 @@ public class DataMapper {
ResultSet l_rsSearch = null; ResultSet l_rsSearch = null;
String CountSQL = "select count(*) from Sentences"; String CountSQL = "select count(*) from Sentences";
String l_sSQL = "delete from Sentences\n" + String l_sSQL = "delete from Sentences\n" +
" where DATE(last_used) < DATE_SUB(CURDATE(), INTERVAL 32 DAY)\n" + " where DATE(last_used) < DATE_SUB(CURDATE(), INTERVAL 10 DAY)\n" +
" order by last_used asc limit 2"; " order by last_used asc limit 2";
try { try {
l_cCon = DBCPDataSource.getConnection(); l_cCon = DBCPDataSource.getConnection();

View File

@ -6,7 +6,6 @@ import edu.mit.jmwe.data.IMWE;
import edu.mit.jmwe.data.IToken; import edu.mit.jmwe.data.IToken;
import edu.stanford.nlp.ie.AbstractSequenceClassifier; import edu.stanford.nlp.ie.AbstractSequenceClassifier;
import edu.stanford.nlp.ie.crf.CRFClassifier; import edu.stanford.nlp.ie.crf.CRFClassifier;
import edu.stanford.nlp.ie.machinereading.structure.EntityMention;
import edu.stanford.nlp.ling.CoreAnnotations; import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel; import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.TaggedWord; import edu.stanford.nlp.ling.TaggedWord;
@ -18,7 +17,6 @@ import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.tagger.maxent.MaxentTagger; import edu.stanford.nlp.tagger.maxent.MaxentTagger;
import edu.stanford.nlp.trees.*; import edu.stanford.nlp.trees.*;
import edu.stanford.nlp.util.CoreMap; import edu.stanford.nlp.util.CoreMap;
import kotlinx.coroutines.*;
import org.ejml.simple.SimpleMatrix; import org.ejml.simple.SimpleMatrix;
import java.util.*; import java.util.*;
@ -202,6 +200,209 @@ public class Datahandler {
} }
} }
private SentimentAnalyzerTest getReponseFuturesHelper(String strF, String str1, StanfordCoreNLP stanfordCoreNLP,
StanfordCoreNLP stanfordCoreNLPSentiment,
List<CoreMap> coreMaps1, Annotation strAnno,
Annotation strAnnoSentiment, CoreDocument coreDocument
, Integer tokenizeCountingF, List<List<TaggedWord>> taggedWordListF, ArrayList<TypedDependency> typedDependenciesF
, ArrayList<Integer> rnnCoreAnnotationsPredictedF, ArrayList<SimpleMatrix> simpleMatricesF
, ArrayList<SimpleMatrix> simpleMatricesNodevectorsF, List<String> listF, Integer longestF, List<CoreMap> sentencesF
, List<CoreMap> sentencesSentimentF, ArrayList<Tree> treesF, ArrayList<GrammaticalStructure> grammaticalStructuresF
, Integer sentimentLongestF, List<IMWE<IToken>> imwesF, Integer inflectedCounterNegativeF, Integer inflectedCounterPositiveF
, ArrayList<String> tokenEntryF, Integer unmarkedPatternCounterF, ArrayList<String> strTokensIpartFormF, ArrayList<String> tokenFormsF
, ArrayList<Integer> intTokenEntyCountsF, Integer markedContinuousCounterF, ArrayList<String> ITokenTagsF
, ArrayList<String> strTokenEntryGetPOSF, ArrayList<String> retrieveTGWListF, Integer pairCounterF
, Integer tokensCounterF, ArrayList<String> stopWordLemmaF, ArrayList<String> nerEntitiesF
, ArrayList<String> stopWordTokenF, ArrayList<String> entityTokenTagsF, ArrayList<String> nerEntitiesTypeF
, Integer anotatorcounterF, ArrayList<String> strTokenStemsF) {
Annotation annotation2 = pipelineSentimentAnnotationCache.getOrDefault(str1, null);
Annotation annotation4 = pipelineAnnotationCache.getOrDefault(str1, null);
CoreDocument coreDocument1 = coreDocumentAnnotationCache.getOrDefault(str1, null);
Annotation jmweAnnotation = jmweAnnotationCache.getOrDefault(str1, null);
if (annotation2 == null) {
createStrAnnotation(str1, stanfordCoreNLPSentiment, true);
}
if (annotation4 == null) {
createStrAnnotation(str1, stanfordCoreNLP, false);
}
if (coreDocument1 == null) {
getCoreDocumentsSuggested(stanfordCoreNLP, str1);
}
if (jmweAnnotation == null) {
getJMWEAnnotation(str1);
jmweAnnotation = jmweAnnotationCache.get(str1);
}
Integer tokenizeCounting = tokenizeCountingHashMap.getOrDefault(str1, null);
List<List<TaggedWord>> taggedWordList1 = taggedWordListHashMap.getOrDefault(str1, null);
java.util.ArrayList<String> retrieveTGWList1 = retrieveTGWListHashMap.getOrDefault(str1, null);
List<CoreMap> sentence1 = sentences1HashMap.getOrDefault(str1, null);
List<CoreMap> sentenceSentiment1 = sentencesSentimentHashMap.getOrDefault(str1, null);
ArrayList<Tree> trees1 = trees1HashMap.getOrDefault(str1, null);
List<CoreMap> coreMaps2 = new ArrayList<>();
ArrayList<GrammaticalStructure> grammaticalStructures1 = grammaticalStructureHashMap.getOrDefault(str1, null);
if (jmweAnnotation != null) {
coreMaps2 = jmweAnnotation.get(CoreAnnotations.SentencesAnnotation.class);
}
ArrayList<TypedDependency> typedDependencies1 = typedDependenciesHashMap.getOrDefault(str1, null);
ArrayList<Integer> rnnCoreAnnotationsPredicted1 = rnnCoreAnnotationsPredictedHashMap.getOrDefault(str1, null);
ArrayList<SimpleMatrix> simpleMatrices1 = simpleMatricesHashMap.getOrDefault(str1, null);
simpleMatricesHashMap.getOrDefault(str1, null);
ArrayList<SimpleMatrix> simpleMatricesNodevectors1 = simpleMatricesNodevectorsHashMap.getOrDefault(str1, null);
List list1 = listHashMap.getOrDefault(str1, null);
Integer longest1 = longestHashMap.getOrDefault(str1, null);
Integer sentimentLongest1 = sentimentHashMap.getOrDefault(str1, null);
List<IMWE<IToken>> imwes1 = imwesHashMap.getOrDefault(str1, null);
Integer InflectedCounterNegative1 = InflectedCounterNegativeHashMap.getOrDefault(str1, null);
Integer InflectedCounterPositive1 = InflectedCounterPositiveHashMap.getOrDefault(str1, null);
ArrayList<String> tokenEntry1 = tokenEntryHashMap.getOrDefault(str1, null);
Integer MarkedContinuousCounter1 = MarkedContinuousCounterHashMap.getOrDefault(str1, null);
Integer UnmarkedPatternCounter1 = UnmarkedPatternCounterHashMap.getOrDefault(str1, null);
ArrayList<String> strTokensIpartForm1 = strTokensIpartFormHashMap.getOrDefault(str1, null);
ArrayList<String> tokenForms1 = tokenFormsHashMap.getOrDefault(str1, null);
ArrayList<String> strTokenEntryGetPOS1 = strTokenEntryGetPOSHashMap.getOrDefault(str1, null);
ArrayList<Integer> intTokenEntyCounts1 = intTokenEntyCountsHashMap.getOrDefault(str1, null);
ArrayList<String> ITokenTags1 = ITokenTagsHashMap.getOrDefault(str1, null);
ArrayList<String> strTokenStems1 = strTokenStemsHashMap.getOrDefault(str1, null);
Integer Anotatorcounter1 = AnotatorcounterHashMap.getOrDefault(str1, null);
Integer TokensCounter1 = TokensCounterHashMap.getOrDefault(str1, null);
ArrayList<String> entityTokenTags1 = entityTokenTagsHashMap.getOrDefault(str1, null);
ArrayList<String> nerEntities1 = nerEntitiesHashMap.getOrDefault(str1, null);
ArrayList<String> nerEntitiesType1 = nerEntitiesTypeHashMap.getOrDefault(str1, null);
ArrayList<String> stopWordToken1 = stopWordTokenHashMap.getOrDefault(str1, null);
ArrayList<String> stopWordLemma1 = stopWordLemmaHashMap.getOrDefault(str1, null);
Integer PairCounter1 = PairCounterHashMap.getOrDefault(str1, null);
SentimentAnalyzerTest SMX = new SentimentAnalyzerTest(strF, str1, new SimilarityMatrix(strF, str1),
coreMaps1, coreMaps2, strAnno,
pipelineAnnotationCache.get(str1), strAnnoSentiment,
pipelineSentimentAnnotationCache.get(str1), coreDocument, coreDocumentAnnotationCache.get(str1),
tagger, gsf, classifier, tokenizeCounting, tokenizeCountingF,
taggedWordListF, taggedWordList1, retrieveTGWListF, retrieveTGWList1,
sentencesF, sentence1, sentencesSentimentF, sentenceSentiment1, treesF, trees1,
grammaticalStructuresF, grammaticalStructures1, typedDependenciesF,
typedDependencies1, rnnCoreAnnotationsPredictedF, rnnCoreAnnotationsPredicted1,
simpleMatricesF, simpleMatrices1, simpleMatricesNodevectorsF, simpleMatricesNodevectors1,
listF, list1, longestF, longest1, sentimentLongestF, sentimentLongest1, imwesF,
imwes1, inflectedCounterNegativeF, InflectedCounterNegative1, inflectedCounterPositiveF,
InflectedCounterPositive1, tokenEntryF, tokenEntry1, markedContinuousCounterF,
MarkedContinuousCounter1, unmarkedPatternCounterF, UnmarkedPatternCounter1,
strTokensIpartFormF, strTokensIpartForm1, tokenFormsF, tokenForms1,
strTokenEntryGetPOSF, strTokenEntryGetPOS1, intTokenEntyCountsF,
intTokenEntyCounts1, ITokenTagsF, ITokenTags1, strTokenStemsF, strTokenStems1,
anotatorcounterF, Anotatorcounter1, tokensCounterF, TokensCounter1,
entityTokenTagsF, entityTokenTags1, nerEntitiesF, nerEntities1, nerEntitiesTypeF,
nerEntitiesType1, stopWordTokenF, stopWordToken1, stopWordLemmaF, stopWordLemma1,
pairCounterF, PairCounter1
);
if (tokenizeCounting == null) {
tokenizeCountingHashMap.put(str1, SMX.getTokenizeCounting());
}
if (taggedWordList1 == null) {
taggedWordListHashMap.put(str1, SMX.getTaggedWordList1());
}
if (retrieveTGWList1 == null) {
retrieveTGWListHashMap.put(str1, SMX.getRetrieveTGWList1());
}
if (sentence1 == null) {
sentences1HashMap.put(str1, SMX.getSentences1());
}
if (sentenceSentiment1 == null) {
sentencesSentimentHashMap.put(str1, SMX.getSentencesSentiment1());
}
if (trees1 == null) {
trees1HashMap.put(str1, SMX.getTrees1());
}
if (grammaticalStructures1 == null) {
grammaticalStructureHashMap.put(str1, SMX.getGrammaticalStructures1());
}
if (typedDependencies1 == null) {
typedDependenciesHashMap.put(str1, SMX.getTypedDependencies1());
}
if (rnnCoreAnnotationsPredicted1 == null) {
rnnCoreAnnotationsPredictedHashMap.put(str1, SMX.getRnnCoreAnnotationsPredicted1());
}
if (simpleMatrices1 == null) {
simpleMatricesHashMap.put(str1, SMX.getSimpleMatrices1());
}
if (simpleMatricesNodevectors1 == null) {
simpleMatricesNodevectorsHashMap.put(str1, SMX.getSimpleMatricesNodevectors1());
}
if (list1 == null) {
listHashMap.put(str1, SMX.getList1());
}
if (longest1 == null) {
longestHashMap.put(str1, SMX.getLongest1());
}
if (sentimentLongest1 == null) {
sentimentHashMap.put(str1, SMX.getSentimentLongest1());
}
if (imwes1 == null) {
imwesHashMap.put(str1, SMX.getImwes1());
}
if (InflectedCounterNegative1 == null) {
InflectedCounterNegativeHashMap.put(str1, SMX.getInflectedCounterNegative1());
}
if (InflectedCounterPositive1 == null) {
InflectedCounterPositiveHashMap.put(str1, SMX.getInflectedCounterPositive1());
}
if (tokenEntry1 == null) {
tokenEntryHashMap.put(str1, SMX.getTokenEntry1());
}
if (MarkedContinuousCounter1 == null) {
MarkedContinuousCounterHashMap.put(str1, SMX.getMarkedContinuousCounter1());
}
if (UnmarkedPatternCounter1 == null) {
UnmarkedPatternCounterHashMap.put(str1, SMX.getUnmarkedPatternCounter1());
}
if (strTokensIpartForm1 == null) {
strTokensIpartFormHashMap.put(str1, SMX.getStrTokensIpartForm1());
}
if (tokenForms1 == null) {
tokenFormsHashMap.put(str1, SMX.getTokenForms1());
}
if (strTokenEntryGetPOS1 == null) {
strTokenEntryGetPOSHashMap.put(str1, SMX.getStrTokenEntryGetPOS1());
}
if (intTokenEntyCounts1 == null) {
intTokenEntyCountsHashMap.put(str1, SMX.getIntTokenEntyCounts1());
}
if (ITokenTags1 == null) {
ITokenTagsHashMap.put(str1, SMX.getITokenTags1());
}
if (strTokenStems1 == null) {
strTokenStemsHashMap.put(str1, SMX.getStrTokenStems1());
}
if (Anotatorcounter1 == null) {
AnotatorcounterHashMap.put(str1, SMX.getAnotatorcounter1());
}
if (TokensCounter1 == null) {
TokensCounterHashMap.put(str1, SMX.getTokensCounter1());
}
if (entityTokenTags1 == null) {
entityTokenTagsHashMap.put(str1, SMX.getEntityTokenTags1());
}
if (nerEntities1 == null) {
nerEntitiesHashMap.put(str1, SMX.getNerEntities1());
}
if (nerEntitiesType1 == null) {
nerEntitiesTypeHashMap.put(str1, SMX.getNerEntitiesType1());
}
if (stopWordToken1 == null) {
stopWordTokenHashMap.put(str1, SMX.getStopWordToken1());
}
if (stopWordLemma1 == null) {
stopWordLemmaHashMap.put(str1, SMX.getStopWordLemma1());
}
if (PairCounter1 == null) {
PairCounterHashMap.put(str1, SMX.getPairCounter1());
}
return SMX;
}
public String getResponseFutures(String strF, StanfordCoreNLP stanfordCoreNLP, StanfordCoreNLP stanfordCoreNLPSentiment) { public String getResponseFutures(String strF, StanfordCoreNLP stanfordCoreNLP, StanfordCoreNLP stanfordCoreNLPSentiment) {
Annotation strAnno = new Annotation(strF); Annotation strAnno = new Annotation(strF);
@ -215,364 +416,159 @@ public class Datahandler {
Annotation annotation = new Annotation(strF); Annotation annotation = new Annotation(strF);
stanfordCoreNLP.annotate(annotation); stanfordCoreNLP.annotate(annotation);
CoreDocument coreDocument = new CoreDocument(annotation); CoreDocument coreDocument = new CoreDocument(annotation);
List<String> ues_copy = new ArrayList(DataMapper.getAllStrings());
double preRelationUserCounters = -155000.0;
ArrayList<String> concurrentRelations = new ArrayList();
StringBuilder SB = new StringBuilder();
Annotation jmweAnnotationF = PipelineJMWESingleton.INSTANCE.getJMWEAnnotation(strF); Annotation jmweAnnotationF = PipelineJMWESingleton.INSTANCE.getJMWEAnnotation(strF);
List<CoreMap> coreMaps1 = jmweAnnotationF.get(CoreAnnotations.SentencesAnnotation.class);
Integer tokenizeCountingF = null; Integer tokenizeCountingF = null;
List<List<TaggedWord>> taggedWordListF = null; List<List<TaggedWord>> taggedWordListF = null;
java.util.ArrayList<String> retrieveTGWListF = null; java.util.ArrayList<String> retrieveTGWListF = null;
List<CoreMap> sentencesF = null; List<CoreMap> sentencesF = null;
List<CoreMap> sentencesSentimentF = null; List<CoreMap> sentencesSentimentF = null;
List<CoreMap> coreMaps1 = jmweAnnotationF.get(CoreAnnotations.SentencesAnnotation.class);
java.util.ArrayList<Tree> treesF = null; java.util.ArrayList<Tree> treesF = null;
ArrayList<GrammaticalStructure> grammaticalStructuresF = null; ArrayList<GrammaticalStructure> grammaticalStructuresF = null;
java.util.ArrayList<TypedDependency> typedDependenciesF = null; java.util.ArrayList<TypedDependency> typedDependenciesF = null;
java.util.ArrayList<Integer> rnnCoreAnnotationsPredictedF = null; java.util.ArrayList<Integer> rnnCoreAnnotationsPredictedF = null;
java.util.ArrayList<SimpleMatrix> simpleMatricesF = null; java.util.ArrayList<SimpleMatrix> simpleMatricesF = null;
java.util.ArrayList<SimpleMatrix> simpleMatricesNodevectorsF = null; java.util.ArrayList<SimpleMatrix> simpleMatricesNodevectorsF = null;
List<String> listF = null; List<String> listF = null;
Integer longestF = null; Integer longestF = null;
Integer sentimentLongestF = null; Integer sentimentLongestF = null;
List<IMWE<IToken>> imwesF = null; List<IMWE<IToken>> imwesF = null;
Integer InflectedCounterNegativeF = null; Integer InflectedCounterNegativeF = null;
Integer InflectedCounterPositiveF = null; Integer InflectedCounterPositiveF = null;
ArrayList<String> tokenEntryF = null; ArrayList<String> tokenEntryF = null;
Integer MarkedContinuousCounterF = null; Integer MarkedContinuousCounterF = null;
Integer UnmarkedPatternCounterF = null; Integer UnmarkedPatternCounterF = null;
ArrayList<String> strTokensIpartFormF = null; ArrayList<String> strTokensIpartFormF = null;
java.util.ArrayList<String> tokenFormsF = null; java.util.ArrayList<String> tokenFormsF = null;
ArrayList<String> strTokenEntryGetPOSF = null; ArrayList<String> strTokenEntryGetPOSF = null;
java.util.ArrayList<Integer> intTokenEntyCountsF = null; java.util.ArrayList<Integer> intTokenEntyCountsF = null;
ArrayList<String> ITokenTagsF = null; ArrayList<String> ITokenTagsF = null;
java.util.ArrayList<String> strTokenStemsF = null; java.util.ArrayList<String> strTokenStemsF = null;
Integer AnotatorcounterF = null; Integer AnotatorcounterF = null;
Integer TokensCounterF = null; Integer TokensCounterF = null;
java.util.ArrayList<String> entityTokenTagsF = null; java.util.ArrayList<String> entityTokenTagsF = null;
java.util.ArrayList<String> nerEntitiesF = null; java.util.ArrayList<String> nerEntitiesF = null;
java.util.ArrayList<String> nerEntitiesTypeF = null; java.util.ArrayList<String> nerEntitiesTypeF = null;
java.util.ArrayList<String> stopWordTokenF = null; java.util.ArrayList<String> stopWordTokenF = null;
java.util.ArrayList<String> stopWordLemmaF = null; java.util.ArrayList<String> stopWordLemmaF = null;
Integer PairCounterF = null; Integer PairCounterF = null;
ArrayList<String> concurrentRelations = new ArrayList();
StringBuilder SB = new StringBuilder();
List<String> ues_copy = new ArrayList(DataMapper.getAllStrings());
double preRelationUserCounters = -155000.0;
for (String str1 : ues_copy) { for (String str1 : ues_copy) {
if (strF != str1) { if (strF != str1) {
Annotation annotation2 = pipelineSentimentAnnotationCache.getOrDefault(str1, null); SentimentAnalyzerTest SMX = getReponseFuturesHelper(strF, str1, stanfordCoreNLP, stanfordCoreNLPSentiment,
Annotation annotation4 = pipelineAnnotationCache.getOrDefault(str1, null); coreMaps1, strAnno, strAnnoSentiment, coreDocument, tokenizeCountingF, taggedWordListF
CoreDocument coreDocument1 = coreDocumentAnnotationCache.getOrDefault(str1, null); , typedDependenciesF, rnnCoreAnnotationsPredictedF, simpleMatricesF, simpleMatricesNodevectorsF
Annotation jmweAnnotation = jmweAnnotationCache.getOrDefault(str1, null); , listF, longestF, sentencesF, sentencesSentimentF, treesF, grammaticalStructuresF, sentimentLongestF
if (annotation2 == null) { , imwesF, InflectedCounterNegativeF, InflectedCounterPositiveF, tokenEntryF, UnmarkedPatternCounterF
createStrAnnotation(str1, stanfordCoreNLPSentiment, true); , strTokensIpartFormF, tokenFormsF, intTokenEntyCountsF, MarkedContinuousCounterF, ITokenTagsF
} , strTokenEntryGetPOSF, retrieveTGWListF, PairCounterF, TokensCounterF, stopWordLemmaF, nerEntitiesF
if (annotation4 == null) { , stopWordTokenF, entityTokenTagsF, nerEntitiesTypeF, AnotatorcounterF, strTokenStemsF);
createStrAnnotation(str1, stanfordCoreNLP, false);
}
if (coreDocument1 == null) {
getCoreDocumentsSuggested(stanfordCoreNLP, str1);
}
if (jmweAnnotation == null) {
getJMWEAnnotation(str1);
jmweAnnotation = jmweAnnotationCache.get(str1);
}
Integer tokenizeCounting = tokenizeCountingHashMap.getOrDefault(str1, null);
List<List<TaggedWord>> taggedWordList1 = taggedWordListHashMap.getOrDefault(str1, null);
java.util.ArrayList<String> retrieveTGWList1 = retrieveTGWListHashMap.getOrDefault(str1, null);
List<CoreMap> sentence1 = sentences1HashMap.getOrDefault(str1, null);
List<CoreMap> sentenceSentiment1 = sentencesSentimentHashMap.getOrDefault(str1, null);
ArrayList<Tree> trees1 = trees1HashMap.getOrDefault(str1, null);
List<CoreMap> coreMaps2 = new ArrayList<>();
ArrayList<GrammaticalStructure> grammaticalStructures1 = grammaticalStructureHashMap.getOrDefault(str1, null);
if (jmweAnnotation != null) {
coreMaps2 = jmweAnnotation.get(CoreAnnotations.SentencesAnnotation.class);
}
ArrayList<TypedDependency> typedDependencies1 = typedDependenciesHashMap.getOrDefault(str1, null);
ArrayList<Integer> rnnCoreAnnotationsPredicted1 = rnnCoreAnnotationsPredictedHashMap.getOrDefault(str1, null);
ArrayList<SimpleMatrix> simpleMatrices1 = simpleMatricesHashMap.getOrDefault(str1, null);
simpleMatricesHashMap.getOrDefault(str1, null);
ArrayList<SimpleMatrix> simpleMatricesNodevectors1 = simpleMatricesNodevectorsHashMap.getOrDefault(str1, null);
List list1 = listHashMap.getOrDefault(str1, null);
Integer longest1 = longestHashMap.getOrDefault(str1, null);
Integer sentimentLongest1 = sentimentHashMap.getOrDefault(str1, null);
List<IMWE<IToken>> imwes1 = imwesHashMap.getOrDefault(str1, null);
Integer InflectedCounterNegative1 = InflectedCounterNegativeHashMap.getOrDefault(str1, null);
Integer InflectedCounterPositive1 = InflectedCounterPositiveHashMap.getOrDefault(str1, null);
ArrayList<String> tokenEntry1 = tokenEntryHashMap.getOrDefault(str1, null);
Integer MarkedContinuousCounter1 = MarkedContinuousCounterHashMap.getOrDefault(str1, null);
Integer UnmarkedPatternCounter1 = UnmarkedPatternCounterHashMap.getOrDefault(str1, null);
ArrayList<String> strTokensIpartForm1 = strTokensIpartFormHashMap.getOrDefault(str1, null);
ArrayList<String> tokenForms1 = tokenFormsHashMap.getOrDefault(str1, null);
ArrayList<String> strTokenEntryGetPOS1 = strTokenEntryGetPOSHashMap.getOrDefault(str1, null);
ArrayList<Integer> intTokenEntyCounts1 = intTokenEntyCountsHashMap.getOrDefault(str1, null);
ArrayList<String> ITokenTags1 = ITokenTagsHashMap.getOrDefault(str1, null);
ArrayList<String> strTokenStems1 = strTokenStemsHashMap.getOrDefault(str1, null);
Integer Anotatorcounter1 = AnotatorcounterHashMap.getOrDefault(str1, null);
Integer TokensCounter1 = TokensCounterHashMap.getOrDefault(str1, null);
ArrayList<String> entityTokenTags1 = entityTokenTagsHashMap.getOrDefault(str1, null);
ArrayList<String> nerEntities1 = nerEntitiesHashMap.getOrDefault(str1, null);
ArrayList<String> nerEntitiesType1 = nerEntitiesTypeHashMap.getOrDefault(str1, null);
ArrayList<String> stopWordToken1 = stopWordTokenHashMap.getOrDefault(str1, null);
ArrayList<String> stopWordLemma1 = stopWordLemmaHashMap.getOrDefault(str1, null);
Integer PairCounter1 = PairCounterHashMap.getOrDefault(str1, null);
SentimentAnalyzerTest SMX = new SentimentAnalyzerTest(strF, str1, new SimilarityMatrix(strF, str1),
coreMaps1, coreMaps2, strAnno,
pipelineAnnotationCache.get(str1), strAnnoSentiment,
pipelineSentimentAnnotationCache.get(str1), coreDocument, coreDocumentAnnotationCache.get(str1),
tagger, gsf, classifier, tokenizeCounting, tokenizeCountingF,
taggedWordListF, taggedWordList1, retrieveTGWListF, retrieveTGWList1,
sentencesF, sentence1, sentencesSentimentF, sentenceSentiment1, treesF, trees1,
grammaticalStructuresF, grammaticalStructures1, typedDependenciesF,
typedDependencies1, rnnCoreAnnotationsPredictedF, rnnCoreAnnotationsPredicted1,
simpleMatricesF, simpleMatrices1, simpleMatricesNodevectorsF, simpleMatricesNodevectors1,
listF, list1, longestF, longest1, sentimentLongestF, sentimentLongest1, imwesF,
imwes1, InflectedCounterNegativeF, InflectedCounterNegative1, InflectedCounterPositiveF,
InflectedCounterPositive1, tokenEntryF, tokenEntry1, MarkedContinuousCounterF,
MarkedContinuousCounter1, UnmarkedPatternCounterF, UnmarkedPatternCounter1,
strTokensIpartFormF, strTokensIpartForm1, tokenFormsF, tokenForms1,
strTokenEntryGetPOSF, strTokenEntryGetPOS1, intTokenEntyCountsF,
intTokenEntyCounts1, ITokenTagsF, ITokenTags1, strTokenStemsF, strTokenStems1,
AnotatorcounterF, Anotatorcounter1, TokensCounterF, TokensCounter1,
entityTokenTagsF, entityTokenTags1, nerEntitiesF, nerEntities1, nerEntitiesTypeF,
nerEntitiesType1, stopWordTokenF, stopWordToken1, stopWordLemmaF, stopWordLemma1,
PairCounterF, PairCounter1
);
if (tokenizeCounting == null) {
tokenizeCountingHashMap.put(str1, SMX.getTokenizeCounting());
}
if (taggedWordList1 == null) {
taggedWordListHashMap.put(str1, SMX.getTaggedWordList1());
}
if (tokenizeCountingF == null) { if (tokenizeCountingF == null) {
tokenizeCountingF = SMX.getTokenizeCountingF(); tokenizeCountingF = SMX.getTokenizeCountingF();
} }
if (taggedWordListF == null) { if (taggedWordListF == null) {
taggedWordListF = SMX.getTaggedWordListF(); taggedWordListF = SMX.getTaggedWordListF();
} }
if (retrieveTGWListF == null) {
retrieveTGWListF = SMX.getRetrieveTGWListF();
}
if (retrieveTGWList1 == null) {
retrieveTGWListHashMap.put(str1, SMX.getRetrieveTGWList1());
}
if (sentencesF == null) {
sentencesF = SMX.getSentencesF();
}
if (sentence1 == null) {
sentences1HashMap.put(str1, SMX.getSentences1());
}
if (sentencesSentimentF == null) {
sentencesSentimentF = SMX.getSentencesSentimentF();
}
if (sentenceSentiment1 == null) {
sentencesSentimentHashMap.put(str1, SMX.getSentencesSentiment1());
}
if (treesF == null) {
treesF = SMX.getTreesF();
}
if (trees1 == null) {
trees1HashMap.put(str1, SMX.getTrees1());
}
if (grammaticalStructuresF == null) {
grammaticalStructuresF = SMX.getGrammaticalStructuresF();
}
if (grammaticalStructures1 == null) {
grammaticalStructureHashMap.put(str1, SMX.getGrammaticalStructures1());
}
if (typedDependenciesF == null) { if (typedDependenciesF == null) {
typedDependenciesF = SMX.getTypedDependenciesF(); typedDependenciesF = SMX.getTypedDependenciesF();
} }
if (typedDependencies1 == null) {
typedDependenciesHashMap.put(str1, SMX.getTypedDependencies1());
}
if (rnnCoreAnnotationsPredictedF == null) { if (rnnCoreAnnotationsPredictedF == null) {
rnnCoreAnnotationsPredictedF = SMX.getRnnCoreAnnotationsPredictedF(); rnnCoreAnnotationsPredictedF = SMX.getRnnCoreAnnotationsPredictedF();
} }
if (rnnCoreAnnotationsPredicted1 == null) {
rnnCoreAnnotationsPredictedHashMap.put(str1, SMX.getRnnCoreAnnotationsPredicted1());
}
if (simpleMatricesF == null) { if (simpleMatricesF == null) {
simpleMatricesF = SMX.getSimpleMatricesF(); simpleMatricesF = SMX.getSimpleMatricesF();
} }
if (simpleMatrices1 == null) {
simpleMatricesHashMap.put(str1, SMX.getSimpleMatrices1());
}
if (simpleMatricesNodevectorsF == null) { if (simpleMatricesNodevectorsF == null) {
simpleMatricesNodevectorsF = SMX.getSimpleMatricesNodevectorsF(); simpleMatricesNodevectorsF = SMX.getSimpleMatricesNodevectorsF();
} }
if (simpleMatricesNodevectors1 == null) {
simpleMatricesNodevectorsHashMap.put(str1, SMX.getSimpleMatricesNodevectors1());
}
if (listF == null) { if (listF == null) {
listF = SMX.getListF(); listF = SMX.getListF();
} }
if (list1 == null) {
listHashMap.put(str1, SMX.getList1());
}
if (longestF == null) { if (longestF == null) {
longestF = SMX.getLongestF(); longestF = SMX.getLongestF();
} }
if (longest1 == null) { if (sentencesF == null) {
longestHashMap.put(str1, SMX.getLongest1()); sentencesF = SMX.getSentencesF();
}
if (sentencesSentimentF == null) {
sentencesSentimentF = SMX.getSentencesSentimentF();
}
if (treesF == null) {
treesF = SMX.getTreesF();
}
if (grammaticalStructuresF == null) {
grammaticalStructuresF = SMX.getGrammaticalStructuresF();
} }
if (sentimentLongestF == null) { if (sentimentLongestF == null) {
sentimentLongestF = SMX.getSentimentLongestF(); sentimentLongestF = SMX.getSentimentLongestF();
} }
if (sentimentLongest1 == null) {
sentimentHashMap.put(str1, SMX.getSentimentLongest1());
}
if (imwesF == null) { if (imwesF == null) {
imwesF = SMX.getImwesF(); imwesF = SMX.getImwesF();
} }
if (imwes1 == null) {
imwesHashMap.put(str1, SMX.getImwes1());
}
if (InflectedCounterNegativeF == null) { if (InflectedCounterNegativeF == null) {
InflectedCounterNegativeF = SMX.getInflectedCounterNegativeF(); InflectedCounterNegativeF = SMX.getInflectedCounterNegativeF();
} }
if (InflectedCounterNegative1 == null) {
InflectedCounterNegativeHashMap.put(str1, SMX.getInflectedCounterNegative1());
}
if (InflectedCounterPositiveF == null) { if (InflectedCounterPositiveF == null) {
InflectedCounterPositiveF = SMX.getInflectedCounterPositiveF(); InflectedCounterPositiveF = SMX.getInflectedCounterPositiveF();
} }
if (InflectedCounterPositive1 == null) {
InflectedCounterPositiveHashMap.put(str1, SMX.getInflectedCounterPositive1());
}
if (tokenEntryF == null) { if (tokenEntryF == null) {
tokenEntryF = SMX.getTokenEntryF(); tokenEntryF = SMX.getTokenEntryF();
} }
if (tokenEntry1 == null) {
tokenEntryHashMap.put(str1, SMX.getTokenEntry1());
}
if (MarkedContinuousCounterF == null) {
MarkedContinuousCounterF = SMX.getMarkedContinuousCounterF();
}
if (MarkedContinuousCounter1 == null) {
MarkedContinuousCounterHashMap.put(str1, SMX.getMarkedContinuousCounter1());
}
if (UnmarkedPatternCounterF == null) { if (UnmarkedPatternCounterF == null) {
UnmarkedPatternCounterF = SMX.getUnmarkedPatternCounterF(); UnmarkedPatternCounterF = SMX.getUnmarkedPatternCounterF();
} }
if (UnmarkedPatternCounter1 == null) {
UnmarkedPatternCounterHashMap.put(str1, SMX.getUnmarkedPatternCounter1());
}
if (strTokensIpartFormF == null) { if (strTokensIpartFormF == null) {
strTokensIpartFormF = SMX.getStrTokensIpartFormF(); strTokensIpartFormF = SMX.getStrTokensIpartFormF();
} }
if (strTokensIpartForm1 == null) {
strTokensIpartFormHashMap.put(str1, SMX.getStrTokensIpartForm1());
}
if (tokenFormsF == null) { if (tokenFormsF == null) {
tokenFormsF = SMX.getTokenFormsF(); tokenFormsF = SMX.getTokenFormsF();
} }
if (tokenForms1 == null) {
tokenFormsHashMap.put(str1, SMX.getTokenForms1());
}
if (strTokenEntryGetPOSF == null) {
strTokenEntryGetPOSF = SMX.getStrTokenEntryGetPOSF();
}
if (strTokenEntryGetPOS1 == null) {
strTokenEntryGetPOSHashMap.put(str1, SMX.getStrTokenEntryGetPOS1());
}
if (intTokenEntyCountsF == null) { if (intTokenEntyCountsF == null) {
intTokenEntyCountsF = SMX.getIntTokenEntyCountsF(); intTokenEntyCountsF = SMX.getIntTokenEntyCountsF();
} }
if (intTokenEntyCounts1 == null) { if (MarkedContinuousCounterF == null) {
intTokenEntyCountsHashMap.put(str1, SMX.getIntTokenEntyCounts1()); MarkedContinuousCounterF = SMX.getMarkedContinuousCounterF();
} }
if (ITokenTagsF == null) { if (ITokenTagsF == null) {
ITokenTagsF = SMX.getITokenTagsF(); ITokenTagsF = SMX.getITokenTagsF();
} }
if (ITokenTags1 == null) { if (strTokenEntryGetPOSF == null) {
ITokenTagsHashMap.put(str1, SMX.getITokenTags1()); strTokenEntryGetPOSF = SMX.getStrTokenEntryGetPOSF();
} }
if (strTokenStemsF == null) { if (retrieveTGWListF == null) {
strTokenStemsF = SMX.getStrTokenStemsF(); retrieveTGWListF = SMX.getRetrieveTGWListF();
}
if (strTokenStems1 == null) {
strTokenStemsHashMap.put(str1, SMX.getStrTokenStems1());
}
if (AnotatorcounterF == null) {
AnotatorcounterF = SMX.getAnotatorcounterF();
}
if (Anotatorcounter1 == null) {
AnotatorcounterHashMap.put(str1, SMX.getAnotatorcounter1());
}
if (TokensCounterF == null) {
TokensCounterF = SMX.getTokensCounterF();
}
if (TokensCounter1 == null) {
TokensCounterHashMap.put(str1, SMX.getTokensCounter1());
}
if (entityTokenTagsF == null) {
entityTokenTagsF = SMX.getEntityTokenTagsF();
}
if (entityTokenTags1 == null) {
entityTokenTagsHashMap.put(str1, SMX.getEntityTokenTags1());
}
if (nerEntitiesF == null) {
nerEntitiesF = SMX.getNerEntitiesF();
}
if (nerEntities1 == null) {
nerEntitiesHashMap.put(str1, SMX.getNerEntities1());
}
if (nerEntitiesTypeF == null) {
nerEntitiesTypeF = SMX.getNerEntitiesTypeF();
}
if (nerEntitiesType1 == null) {
nerEntitiesTypeHashMap.put(str1, SMX.getNerEntitiesType1());
}
if (stopWordTokenF == null) {
stopWordTokenF = SMX.getStopWordTokenF();
}
if (stopWordToken1 == null) {
stopWordTokenHashMap.put(str1, SMX.getStopWordToken1());
}
if (stopWordLemmaF == null) {
stopWordLemmaF = SMX.getStopWordLemmaF();
}
if (stopWordLemma1 == null) {
stopWordLemmaHashMap.put(str1, SMX.getStopWordLemma1());
} }
if (PairCounterF == null) { if (PairCounterF == null) {
PairCounterF = SMX.getPairCounterF(); PairCounterF = SMX.getPairCounterF();
} }
if (PairCounter1 == null) { if (TokensCounterF == null) {
PairCounterHashMap.put(str1, SMX.getPairCounter1()); TokensCounterF = SMX.getTokensCounterF();
}
if (stopWordLemmaF == null) {
stopWordLemmaF = SMX.getStopWordLemmaF();
}
if (nerEntitiesF == null) {
nerEntitiesF = SMX.getNerEntitiesF();
}
if (stopWordTokenF == null) {
stopWordTokenF = SMX.getStopWordTokenF();
}
if (entityTokenTagsF == null) {
entityTokenTagsF = SMX.getEntityTokenTagsF();
}
if (nerEntitiesTypeF == null) {
nerEntitiesTypeF = SMX.getNerEntitiesTypeF();
}
if (AnotatorcounterF == null) {
AnotatorcounterF = SMX.getAnotatorcounterF();
}
if (strTokenStemsF == null) {
strTokenStemsF = SMX.getStrTokenStemsF();
} }
SimilarityMatrix getSMX = SMX.callSMX(); SimilarityMatrix getSMX = SMX.callSMX();
@ -583,19 +579,41 @@ public class Datahandler {
} }
} }
} }
int cacheRequirement = 6500; int cacheRequirement = 8500;
if (preRelationUserCounters > cacheRequirement && !ues_copy.contains(strF) && filterContent(strF)) { if (preRelationUserCounters > cacheRequirement && !ues_copy.contains(strF) && filterContent(strF)) {
DataMapper.InsertMYSQLStrings(strF); DataMapper.InsertMYSQLStrings(strF);
DataMapper.checkStringsToDelete(); DataMapper.checkStringsToDelete();
} }
double randomLenghtPermit = strF.length() * (Math.random() * Math.random() * Math.random() * (Math.random() * 10)); double randomLenghtPermit = strF.length() * (Math.random() * Math.random() * (Math.random() * 10));
Collections.reverse(concurrentRelations); Collections.reverse(concurrentRelations);
ArrayList<String> mysqlUpdateLastUsed = new ArrayList(); ArrayList<String> mysqlUpdateLastUsed = new ArrayList();
if (!concurrentRelations.isEmpty()) { if (!concurrentRelations.isEmpty()) {
boolean passedFirst = false;
int lastIter = 1;
for (String secondaryRelation : concurrentRelations) { for (String secondaryRelation : concurrentRelations) {
if (SB.toString().length() > randomLenghtPermit && !SB.toString().isEmpty()) { if (SB.toString().length() > randomLenghtPermit && !SB.toString().isEmpty()) {
break; break;
} }
if (passedFirst && lastIter < concurrentRelations.size()) {
String testSTR = SB.toString() + " " + secondaryRelation;
SentimentAnalyzerTest SMX = getReponseFuturesHelper(strF, testSTR, stanfordCoreNLP, stanfordCoreNLPSentiment,
coreMaps1, strAnno, strAnnoSentiment, coreDocument, tokenizeCountingF, taggedWordListF
, typedDependenciesF, rnnCoreAnnotationsPredictedF, simpleMatricesF, simpleMatricesNodevectorsF
, listF, longestF, sentencesF, sentencesSentimentF, treesF, grammaticalStructuresF, sentimentLongestF
, imwesF, InflectedCounterNegativeF, InflectedCounterPositiveF, tokenEntryF, UnmarkedPatternCounterF
, strTokensIpartFormF, tokenFormsF, intTokenEntyCountsF, MarkedContinuousCounterF, ITokenTagsF
, strTokenEntryGetPOSF, retrieveTGWListF, PairCounterF, TokensCounterF, stopWordLemmaF, nerEntitiesF
, stopWordTokenF, entityTokenTagsF, nerEntitiesTypeF, AnotatorcounterF, strTokenStemsF);
SimilarityMatrix getSMX = SMX.callSMX();
double scoreRelationLastUserMsg = getSMX.getDistance();
if (preRelationUserCounters > scoreRelationLastUserMsg) {
break;
}
}
passedFirst = true;
lastIter++;
SB.append(secondaryRelation).append(" "); SB.append(secondaryRelation).append(" ");
mysqlUpdateLastUsed.add(secondaryRelation); mysqlUpdateLastUsed.add(secondaryRelation);
} }

View File

@ -89,7 +89,8 @@ public class DiscordHandler extends ListenerAdapter {
private static StanfordCoreNLP stanfordCoreNLP; private static StanfordCoreNLP stanfordCoreNLP;
private static Datahandler datahandler; private static Datahandler datahandler;
private static StanfordCoreNLP stanfordCoreNLPSentiment; private static StanfordCoreNLP stanfordCoreNLPSentiment;
private static ExecutorService executorService = Executors.newFixedThreadPool(2); private static ExecutorService executorService = Executors.newFixedThreadPool(1);
private static ExecutorService executorServiceIngame = Executors.newFixedThreadPool(1);
//TODO add python program that edits the java code. python program just adds test if statements on //TODO add python program that edits the java code. python program just adds test if statements on
//variables until the tests pass //variables until the tests pass
@ -113,14 +114,17 @@ public class DiscordHandler extends ListenerAdapter {
//make sure not to use ports that are already occupied. //make sure not to use ports that are already occupied.
for (int i = 0; i < autismbotCount; i++) { for (int i = 0; i < autismbotCount; i++) {
final int j = i; final int j = i;
new Thread(() -> { executorServiceIngame.execute(new Runnable() {
ArrayList<Integer> ports = new ArrayList<Integer>(); @Override
ports.add(48475); public void run() {
ports.add(48476); ArrayList<Integer> ports = new ArrayList<Integer>();
ports.add(48477); ports.add(48475);
ports.add(48478); ports.add(48476);
handleUDPTraffic(ports.get(j), datahandler, stanfordCoreNLP, stanfordCoreNLPSentiment); ports.add(48477);
}).start(); ports.add(48478);
handleUDPTraffic(ports.get(j), datahandler, stanfordCoreNLP, stanfordCoreNLPSentiment);
}
});
} }
} }
@ -134,7 +138,8 @@ public class DiscordHandler extends ListenerAdapter {
content = content.replace(member.getId(), ""); content = content.replace(member.getId(), "");
} }
} }
if (username != null && !event.getAuthor().isBot() && !content.isEmpty()) { if (username != null && !event.getAuthor().isBot() && !content.isEmpty()
&& event.getMessage().getCategory() != null) {
String channelName = event.getMessage().getChannel().getName().toLowerCase(); String channelName = event.getMessage().getChannel().getName().toLowerCase();
boolean channelpermissionsDenied = false; boolean channelpermissionsDenied = false;
if (channelName.contains("suggestion-box")) { if (channelName.contains("suggestion-box")) {