reduced threads for secondary processing and added threadpool for udp messages. furthermore moved logic around so response concat can be valued

This commit is contained in:
christian 2021-12-07 22:26:19 +01:00
parent e9969f836e
commit e234ce0109
3 changed files with 298 additions and 275 deletions

View File

@ -89,7 +89,7 @@ public class DataMapper {
ResultSet l_rsSearch = null;
String CountSQL = "select count(*) from Sentences";
String l_sSQL = "delete from Sentences\n" +
" where DATE(last_used) < DATE_SUB(CURDATE(), INTERVAL 32 DAY)\n" +
" where DATE(last_used) < DATE_SUB(CURDATE(), INTERVAL 10 DAY)\n" +
" order by last_used asc limit 2";
try {
l_cCon = DBCPDataSource.getConnection();

View File

@ -6,7 +6,6 @@ import edu.mit.jmwe.data.IMWE;
import edu.mit.jmwe.data.IToken;
import edu.stanford.nlp.ie.AbstractSequenceClassifier;
import edu.stanford.nlp.ie.crf.CRFClassifier;
import edu.stanford.nlp.ie.machinereading.structure.EntityMention;
import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.TaggedWord;
@ -18,7 +17,6 @@ import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.tagger.maxent.MaxentTagger;
import edu.stanford.nlp.trees.*;
import edu.stanford.nlp.util.CoreMap;
import kotlinx.coroutines.*;
import org.ejml.simple.SimpleMatrix;
import java.util.*;
@ -202,6 +200,209 @@ public class Datahandler {
}
}
private SentimentAnalyzerTest getReponseFuturesHelper(String strF, String str1, StanfordCoreNLP stanfordCoreNLP,
StanfordCoreNLP stanfordCoreNLPSentiment,
List<CoreMap> coreMaps1, Annotation strAnno,
Annotation strAnnoSentiment, CoreDocument coreDocument
, Integer tokenizeCountingF, List<List<TaggedWord>> taggedWordListF, ArrayList<TypedDependency> typedDependenciesF
, ArrayList<Integer> rnnCoreAnnotationsPredictedF, ArrayList<SimpleMatrix> simpleMatricesF
, ArrayList<SimpleMatrix> simpleMatricesNodevectorsF, List<String> listF, Integer longestF, List<CoreMap> sentencesF
, List<CoreMap> sentencesSentimentF, ArrayList<Tree> treesF, ArrayList<GrammaticalStructure> grammaticalStructuresF
, Integer sentimentLongestF, List<IMWE<IToken>> imwesF, Integer inflectedCounterNegativeF, Integer inflectedCounterPositiveF
, ArrayList<String> tokenEntryF, Integer unmarkedPatternCounterF, ArrayList<String> strTokensIpartFormF, ArrayList<String> tokenFormsF
, ArrayList<Integer> intTokenEntyCountsF, Integer markedContinuousCounterF, ArrayList<String> ITokenTagsF
, ArrayList<String> strTokenEntryGetPOSF, ArrayList<String> retrieveTGWListF, Integer pairCounterF
, Integer tokensCounterF, ArrayList<String> stopWordLemmaF, ArrayList<String> nerEntitiesF
, ArrayList<String> stopWordTokenF, ArrayList<String> entityTokenTagsF, ArrayList<String> nerEntitiesTypeF
, Integer anotatorcounterF, ArrayList<String> strTokenStemsF) {
Annotation annotation2 = pipelineSentimentAnnotationCache.getOrDefault(str1, null);
Annotation annotation4 = pipelineAnnotationCache.getOrDefault(str1, null);
CoreDocument coreDocument1 = coreDocumentAnnotationCache.getOrDefault(str1, null);
Annotation jmweAnnotation = jmweAnnotationCache.getOrDefault(str1, null);
if (annotation2 == null) {
createStrAnnotation(str1, stanfordCoreNLPSentiment, true);
}
if (annotation4 == null) {
createStrAnnotation(str1, stanfordCoreNLP, false);
}
if (coreDocument1 == null) {
getCoreDocumentsSuggested(stanfordCoreNLP, str1);
}
if (jmweAnnotation == null) {
getJMWEAnnotation(str1);
jmweAnnotation = jmweAnnotationCache.get(str1);
}
Integer tokenizeCounting = tokenizeCountingHashMap.getOrDefault(str1, null);
List<List<TaggedWord>> taggedWordList1 = taggedWordListHashMap.getOrDefault(str1, null);
java.util.ArrayList<String> retrieveTGWList1 = retrieveTGWListHashMap.getOrDefault(str1, null);
List<CoreMap> sentence1 = sentences1HashMap.getOrDefault(str1, null);
List<CoreMap> sentenceSentiment1 = sentencesSentimentHashMap.getOrDefault(str1, null);
ArrayList<Tree> trees1 = trees1HashMap.getOrDefault(str1, null);
List<CoreMap> coreMaps2 = new ArrayList<>();
ArrayList<GrammaticalStructure> grammaticalStructures1 = grammaticalStructureHashMap.getOrDefault(str1, null);
if (jmweAnnotation != null) {
coreMaps2 = jmweAnnotation.get(CoreAnnotations.SentencesAnnotation.class);
}
ArrayList<TypedDependency> typedDependencies1 = typedDependenciesHashMap.getOrDefault(str1, null);
ArrayList<Integer> rnnCoreAnnotationsPredicted1 = rnnCoreAnnotationsPredictedHashMap.getOrDefault(str1, null);
ArrayList<SimpleMatrix> simpleMatrices1 = simpleMatricesHashMap.getOrDefault(str1, null);
simpleMatricesHashMap.getOrDefault(str1, null);
ArrayList<SimpleMatrix> simpleMatricesNodevectors1 = simpleMatricesNodevectorsHashMap.getOrDefault(str1, null);
List list1 = listHashMap.getOrDefault(str1, null);
Integer longest1 = longestHashMap.getOrDefault(str1, null);
Integer sentimentLongest1 = sentimentHashMap.getOrDefault(str1, null);
List<IMWE<IToken>> imwes1 = imwesHashMap.getOrDefault(str1, null);
Integer InflectedCounterNegative1 = InflectedCounterNegativeHashMap.getOrDefault(str1, null);
Integer InflectedCounterPositive1 = InflectedCounterPositiveHashMap.getOrDefault(str1, null);
ArrayList<String> tokenEntry1 = tokenEntryHashMap.getOrDefault(str1, null);
Integer MarkedContinuousCounter1 = MarkedContinuousCounterHashMap.getOrDefault(str1, null);
Integer UnmarkedPatternCounter1 = UnmarkedPatternCounterHashMap.getOrDefault(str1, null);
ArrayList<String> strTokensIpartForm1 = strTokensIpartFormHashMap.getOrDefault(str1, null);
ArrayList<String> tokenForms1 = tokenFormsHashMap.getOrDefault(str1, null);
ArrayList<String> strTokenEntryGetPOS1 = strTokenEntryGetPOSHashMap.getOrDefault(str1, null);
ArrayList<Integer> intTokenEntyCounts1 = intTokenEntyCountsHashMap.getOrDefault(str1, null);
ArrayList<String> ITokenTags1 = ITokenTagsHashMap.getOrDefault(str1, null);
ArrayList<String> strTokenStems1 = strTokenStemsHashMap.getOrDefault(str1, null);
Integer Anotatorcounter1 = AnotatorcounterHashMap.getOrDefault(str1, null);
Integer TokensCounter1 = TokensCounterHashMap.getOrDefault(str1, null);
ArrayList<String> entityTokenTags1 = entityTokenTagsHashMap.getOrDefault(str1, null);
ArrayList<String> nerEntities1 = nerEntitiesHashMap.getOrDefault(str1, null);
ArrayList<String> nerEntitiesType1 = nerEntitiesTypeHashMap.getOrDefault(str1, null);
ArrayList<String> stopWordToken1 = stopWordTokenHashMap.getOrDefault(str1, null);
ArrayList<String> stopWordLemma1 = stopWordLemmaHashMap.getOrDefault(str1, null);
Integer PairCounter1 = PairCounterHashMap.getOrDefault(str1, null);
SentimentAnalyzerTest SMX = new SentimentAnalyzerTest(strF, str1, new SimilarityMatrix(strF, str1),
coreMaps1, coreMaps2, strAnno,
pipelineAnnotationCache.get(str1), strAnnoSentiment,
pipelineSentimentAnnotationCache.get(str1), coreDocument, coreDocumentAnnotationCache.get(str1),
tagger, gsf, classifier, tokenizeCounting, tokenizeCountingF,
taggedWordListF, taggedWordList1, retrieveTGWListF, retrieveTGWList1,
sentencesF, sentence1, sentencesSentimentF, sentenceSentiment1, treesF, trees1,
grammaticalStructuresF, grammaticalStructures1, typedDependenciesF,
typedDependencies1, rnnCoreAnnotationsPredictedF, rnnCoreAnnotationsPredicted1,
simpleMatricesF, simpleMatrices1, simpleMatricesNodevectorsF, simpleMatricesNodevectors1,
listF, list1, longestF, longest1, sentimentLongestF, sentimentLongest1, imwesF,
imwes1, inflectedCounterNegativeF, InflectedCounterNegative1, inflectedCounterPositiveF,
InflectedCounterPositive1, tokenEntryF, tokenEntry1, markedContinuousCounterF,
MarkedContinuousCounter1, unmarkedPatternCounterF, UnmarkedPatternCounter1,
strTokensIpartFormF, strTokensIpartForm1, tokenFormsF, tokenForms1,
strTokenEntryGetPOSF, strTokenEntryGetPOS1, intTokenEntyCountsF,
intTokenEntyCounts1, ITokenTagsF, ITokenTags1, strTokenStemsF, strTokenStems1,
anotatorcounterF, Anotatorcounter1, tokensCounterF, TokensCounter1,
entityTokenTagsF, entityTokenTags1, nerEntitiesF, nerEntities1, nerEntitiesTypeF,
nerEntitiesType1, stopWordTokenF, stopWordToken1, stopWordLemmaF, stopWordLemma1,
pairCounterF, PairCounter1
);
if (tokenizeCounting == null) {
tokenizeCountingHashMap.put(str1, SMX.getTokenizeCounting());
}
if (taggedWordList1 == null) {
taggedWordListHashMap.put(str1, SMX.getTaggedWordList1());
}
if (retrieveTGWList1 == null) {
retrieveTGWListHashMap.put(str1, SMX.getRetrieveTGWList1());
}
if (sentence1 == null) {
sentences1HashMap.put(str1, SMX.getSentences1());
}
if (sentenceSentiment1 == null) {
sentencesSentimentHashMap.put(str1, SMX.getSentencesSentiment1());
}
if (trees1 == null) {
trees1HashMap.put(str1, SMX.getTrees1());
}
if (grammaticalStructures1 == null) {
grammaticalStructureHashMap.put(str1, SMX.getGrammaticalStructures1());
}
if (typedDependencies1 == null) {
typedDependenciesHashMap.put(str1, SMX.getTypedDependencies1());
}
if (rnnCoreAnnotationsPredicted1 == null) {
rnnCoreAnnotationsPredictedHashMap.put(str1, SMX.getRnnCoreAnnotationsPredicted1());
}
if (simpleMatrices1 == null) {
simpleMatricesHashMap.put(str1, SMX.getSimpleMatrices1());
}
if (simpleMatricesNodevectors1 == null) {
simpleMatricesNodevectorsHashMap.put(str1, SMX.getSimpleMatricesNodevectors1());
}
if (list1 == null) {
listHashMap.put(str1, SMX.getList1());
}
if (longest1 == null) {
longestHashMap.put(str1, SMX.getLongest1());
}
if (sentimentLongest1 == null) {
sentimentHashMap.put(str1, SMX.getSentimentLongest1());
}
if (imwes1 == null) {
imwesHashMap.put(str1, SMX.getImwes1());
}
if (InflectedCounterNegative1 == null) {
InflectedCounterNegativeHashMap.put(str1, SMX.getInflectedCounterNegative1());
}
if (InflectedCounterPositive1 == null) {
InflectedCounterPositiveHashMap.put(str1, SMX.getInflectedCounterPositive1());
}
if (tokenEntry1 == null) {
tokenEntryHashMap.put(str1, SMX.getTokenEntry1());
}
if (MarkedContinuousCounter1 == null) {
MarkedContinuousCounterHashMap.put(str1, SMX.getMarkedContinuousCounter1());
}
if (UnmarkedPatternCounter1 == null) {
UnmarkedPatternCounterHashMap.put(str1, SMX.getUnmarkedPatternCounter1());
}
if (strTokensIpartForm1 == null) {
strTokensIpartFormHashMap.put(str1, SMX.getStrTokensIpartForm1());
}
if (tokenForms1 == null) {
tokenFormsHashMap.put(str1, SMX.getTokenForms1());
}
if (strTokenEntryGetPOS1 == null) {
strTokenEntryGetPOSHashMap.put(str1, SMX.getStrTokenEntryGetPOS1());
}
if (intTokenEntyCounts1 == null) {
intTokenEntyCountsHashMap.put(str1, SMX.getIntTokenEntyCounts1());
}
if (ITokenTags1 == null) {
ITokenTagsHashMap.put(str1, SMX.getITokenTags1());
}
if (strTokenStems1 == null) {
strTokenStemsHashMap.put(str1, SMX.getStrTokenStems1());
}
if (Anotatorcounter1 == null) {
AnotatorcounterHashMap.put(str1, SMX.getAnotatorcounter1());
}
if (TokensCounter1 == null) {
TokensCounterHashMap.put(str1, SMX.getTokensCounter1());
}
if (entityTokenTags1 == null) {
entityTokenTagsHashMap.put(str1, SMX.getEntityTokenTags1());
}
if (nerEntities1 == null) {
nerEntitiesHashMap.put(str1, SMX.getNerEntities1());
}
if (nerEntitiesType1 == null) {
nerEntitiesTypeHashMap.put(str1, SMX.getNerEntitiesType1());
}
if (stopWordToken1 == null) {
stopWordTokenHashMap.put(str1, SMX.getStopWordToken1());
}
if (stopWordLemma1 == null) {
stopWordLemmaHashMap.put(str1, SMX.getStopWordLemma1());
}
if (PairCounter1 == null) {
PairCounterHashMap.put(str1, SMX.getPairCounter1());
}
return SMX;
}
public String getResponseFutures(String strF, StanfordCoreNLP stanfordCoreNLP, StanfordCoreNLP stanfordCoreNLPSentiment) {
Annotation strAnno = new Annotation(strF);
@ -215,364 +416,159 @@ public class Datahandler {
Annotation annotation = new Annotation(strF);
stanfordCoreNLP.annotate(annotation);
CoreDocument coreDocument = new CoreDocument(annotation);
List<String> ues_copy = new ArrayList(DataMapper.getAllStrings());
double preRelationUserCounters = -155000.0;
ArrayList<String> concurrentRelations = new ArrayList();
StringBuilder SB = new StringBuilder();
Annotation jmweAnnotationF = PipelineJMWESingleton.INSTANCE.getJMWEAnnotation(strF);
List<CoreMap> coreMaps1 = jmweAnnotationF.get(CoreAnnotations.SentencesAnnotation.class);
Integer tokenizeCountingF = null;
List<List<TaggedWord>> taggedWordListF = null;
java.util.ArrayList<String> retrieveTGWListF = null;
List<CoreMap> sentencesF = null;
List<CoreMap> sentencesSentimentF = null;
List<CoreMap> coreMaps1 = jmweAnnotationF.get(CoreAnnotations.SentencesAnnotation.class);
java.util.ArrayList<Tree> treesF = null;
ArrayList<GrammaticalStructure> grammaticalStructuresF = null;
java.util.ArrayList<TypedDependency> typedDependenciesF = null;
java.util.ArrayList<Integer> rnnCoreAnnotationsPredictedF = null;
java.util.ArrayList<SimpleMatrix> simpleMatricesF = null;
java.util.ArrayList<SimpleMatrix> simpleMatricesNodevectorsF = null;
List<String> listF = null;
Integer longestF = null;
Integer sentimentLongestF = null;
List<IMWE<IToken>> imwesF = null;
Integer InflectedCounterNegativeF = null;
Integer InflectedCounterPositiveF = null;
ArrayList<String> tokenEntryF = null;
Integer MarkedContinuousCounterF = null;
Integer UnmarkedPatternCounterF = null;
ArrayList<String> strTokensIpartFormF = null;
java.util.ArrayList<String> tokenFormsF = null;
ArrayList<String> strTokenEntryGetPOSF = null;
java.util.ArrayList<Integer> intTokenEntyCountsF = null;
ArrayList<String> ITokenTagsF = null;
java.util.ArrayList<String> strTokenStemsF = null;
Integer AnotatorcounterF = null;
Integer TokensCounterF = null;
java.util.ArrayList<String> entityTokenTagsF = null;
java.util.ArrayList<String> nerEntitiesF = null;
java.util.ArrayList<String> nerEntitiesTypeF = null;
java.util.ArrayList<String> stopWordTokenF = null;
java.util.ArrayList<String> stopWordLemmaF = null;
Integer PairCounterF = null;
ArrayList<String> concurrentRelations = new ArrayList();
StringBuilder SB = new StringBuilder();
List<String> ues_copy = new ArrayList(DataMapper.getAllStrings());
double preRelationUserCounters = -155000.0;
for (String str1 : ues_copy) {
if (strF != str1) {
Annotation annotation2 = pipelineSentimentAnnotationCache.getOrDefault(str1, null);
Annotation annotation4 = pipelineAnnotationCache.getOrDefault(str1, null);
CoreDocument coreDocument1 = coreDocumentAnnotationCache.getOrDefault(str1, null);
Annotation jmweAnnotation = jmweAnnotationCache.getOrDefault(str1, null);
if (annotation2 == null) {
createStrAnnotation(str1, stanfordCoreNLPSentiment, true);
}
if (annotation4 == null) {
createStrAnnotation(str1, stanfordCoreNLP, false);
}
if (coreDocument1 == null) {
getCoreDocumentsSuggested(stanfordCoreNLP, str1);
}
if (jmweAnnotation == null) {
getJMWEAnnotation(str1);
jmweAnnotation = jmweAnnotationCache.get(str1);
}
Integer tokenizeCounting = tokenizeCountingHashMap.getOrDefault(str1, null);
List<List<TaggedWord>> taggedWordList1 = taggedWordListHashMap.getOrDefault(str1, null);
java.util.ArrayList<String> retrieveTGWList1 = retrieveTGWListHashMap.getOrDefault(str1, null);
List<CoreMap> sentence1 = sentences1HashMap.getOrDefault(str1, null);
List<CoreMap> sentenceSentiment1 = sentencesSentimentHashMap.getOrDefault(str1, null);
ArrayList<Tree> trees1 = trees1HashMap.getOrDefault(str1, null);
List<CoreMap> coreMaps2 = new ArrayList<>();
ArrayList<GrammaticalStructure> grammaticalStructures1 = grammaticalStructureHashMap.getOrDefault(str1, null);
if (jmweAnnotation != null) {
coreMaps2 = jmweAnnotation.get(CoreAnnotations.SentencesAnnotation.class);
}
ArrayList<TypedDependency> typedDependencies1 = typedDependenciesHashMap.getOrDefault(str1, null);
ArrayList<Integer> rnnCoreAnnotationsPredicted1 = rnnCoreAnnotationsPredictedHashMap.getOrDefault(str1, null);
ArrayList<SimpleMatrix> simpleMatrices1 = simpleMatricesHashMap.getOrDefault(str1, null);
simpleMatricesHashMap.getOrDefault(str1, null);
ArrayList<SimpleMatrix> simpleMatricesNodevectors1 = simpleMatricesNodevectorsHashMap.getOrDefault(str1, null);
List list1 = listHashMap.getOrDefault(str1, null);
Integer longest1 = longestHashMap.getOrDefault(str1, null);
Integer sentimentLongest1 = sentimentHashMap.getOrDefault(str1, null);
List<IMWE<IToken>> imwes1 = imwesHashMap.getOrDefault(str1, null);
Integer InflectedCounterNegative1 = InflectedCounterNegativeHashMap.getOrDefault(str1, null);
Integer InflectedCounterPositive1 = InflectedCounterPositiveHashMap.getOrDefault(str1, null);
ArrayList<String> tokenEntry1 = tokenEntryHashMap.getOrDefault(str1, null);
Integer MarkedContinuousCounter1 = MarkedContinuousCounterHashMap.getOrDefault(str1, null);
Integer UnmarkedPatternCounter1 = UnmarkedPatternCounterHashMap.getOrDefault(str1, null);
ArrayList<String> strTokensIpartForm1 = strTokensIpartFormHashMap.getOrDefault(str1, null);
ArrayList<String> tokenForms1 = tokenFormsHashMap.getOrDefault(str1, null);
ArrayList<String> strTokenEntryGetPOS1 = strTokenEntryGetPOSHashMap.getOrDefault(str1, null);
ArrayList<Integer> intTokenEntyCounts1 = intTokenEntyCountsHashMap.getOrDefault(str1, null);
ArrayList<String> ITokenTags1 = ITokenTagsHashMap.getOrDefault(str1, null);
ArrayList<String> strTokenStems1 = strTokenStemsHashMap.getOrDefault(str1, null);
Integer Anotatorcounter1 = AnotatorcounterHashMap.getOrDefault(str1, null);
Integer TokensCounter1 = TokensCounterHashMap.getOrDefault(str1, null);
ArrayList<String> entityTokenTags1 = entityTokenTagsHashMap.getOrDefault(str1, null);
ArrayList<String> nerEntities1 = nerEntitiesHashMap.getOrDefault(str1, null);
ArrayList<String> nerEntitiesType1 = nerEntitiesTypeHashMap.getOrDefault(str1, null);
ArrayList<String> stopWordToken1 = stopWordTokenHashMap.getOrDefault(str1, null);
ArrayList<String> stopWordLemma1 = stopWordLemmaHashMap.getOrDefault(str1, null);
Integer PairCounter1 = PairCounterHashMap.getOrDefault(str1, null);
SentimentAnalyzerTest SMX = new SentimentAnalyzerTest(strF, str1, new SimilarityMatrix(strF, str1),
coreMaps1, coreMaps2, strAnno,
pipelineAnnotationCache.get(str1), strAnnoSentiment,
pipelineSentimentAnnotationCache.get(str1), coreDocument, coreDocumentAnnotationCache.get(str1),
tagger, gsf, classifier, tokenizeCounting, tokenizeCountingF,
taggedWordListF, taggedWordList1, retrieveTGWListF, retrieveTGWList1,
sentencesF, sentence1, sentencesSentimentF, sentenceSentiment1, treesF, trees1,
grammaticalStructuresF, grammaticalStructures1, typedDependenciesF,
typedDependencies1, rnnCoreAnnotationsPredictedF, rnnCoreAnnotationsPredicted1,
simpleMatricesF, simpleMatrices1, simpleMatricesNodevectorsF, simpleMatricesNodevectors1,
listF, list1, longestF, longest1, sentimentLongestF, sentimentLongest1, imwesF,
imwes1, InflectedCounterNegativeF, InflectedCounterNegative1, InflectedCounterPositiveF,
InflectedCounterPositive1, tokenEntryF, tokenEntry1, MarkedContinuousCounterF,
MarkedContinuousCounter1, UnmarkedPatternCounterF, UnmarkedPatternCounter1,
strTokensIpartFormF, strTokensIpartForm1, tokenFormsF, tokenForms1,
strTokenEntryGetPOSF, strTokenEntryGetPOS1, intTokenEntyCountsF,
intTokenEntyCounts1, ITokenTagsF, ITokenTags1, strTokenStemsF, strTokenStems1,
AnotatorcounterF, Anotatorcounter1, TokensCounterF, TokensCounter1,
entityTokenTagsF, entityTokenTags1, nerEntitiesF, nerEntities1, nerEntitiesTypeF,
nerEntitiesType1, stopWordTokenF, stopWordToken1, stopWordLemmaF, stopWordLemma1,
PairCounterF, PairCounter1
);
if (tokenizeCounting == null) {
tokenizeCountingHashMap.put(str1, SMX.getTokenizeCounting());
}
if (taggedWordList1 == null) {
taggedWordListHashMap.put(str1, SMX.getTaggedWordList1());
}
SentimentAnalyzerTest SMX = getReponseFuturesHelper(strF, str1, stanfordCoreNLP, stanfordCoreNLPSentiment,
coreMaps1, strAnno, strAnnoSentiment, coreDocument, tokenizeCountingF, taggedWordListF
, typedDependenciesF, rnnCoreAnnotationsPredictedF, simpleMatricesF, simpleMatricesNodevectorsF
, listF, longestF, sentencesF, sentencesSentimentF, treesF, grammaticalStructuresF, sentimentLongestF
, imwesF, InflectedCounterNegativeF, InflectedCounterPositiveF, tokenEntryF, UnmarkedPatternCounterF
, strTokensIpartFormF, tokenFormsF, intTokenEntyCountsF, MarkedContinuousCounterF, ITokenTagsF
, strTokenEntryGetPOSF, retrieveTGWListF, PairCounterF, TokensCounterF, stopWordLemmaF, nerEntitiesF
, stopWordTokenF, entityTokenTagsF, nerEntitiesTypeF, AnotatorcounterF, strTokenStemsF);
if (tokenizeCountingF == null) {
tokenizeCountingF = SMX.getTokenizeCountingF();
}
if (taggedWordListF == null) {
taggedWordListF = SMX.getTaggedWordListF();
}
if (retrieveTGWListF == null) {
retrieveTGWListF = SMX.getRetrieveTGWListF();
}
if (retrieveTGWList1 == null) {
retrieveTGWListHashMap.put(str1, SMX.getRetrieveTGWList1());
}
if (sentencesF == null) {
sentencesF = SMX.getSentencesF();
}
if (sentence1 == null) {
sentences1HashMap.put(str1, SMX.getSentences1());
}
if (sentencesSentimentF == null) {
sentencesSentimentF = SMX.getSentencesSentimentF();
}
if (sentenceSentiment1 == null) {
sentencesSentimentHashMap.put(str1, SMX.getSentencesSentiment1());
}
if (treesF == null) {
treesF = SMX.getTreesF();
}
if (trees1 == null) {
trees1HashMap.put(str1, SMX.getTrees1());
}
if (grammaticalStructuresF == null) {
grammaticalStructuresF = SMX.getGrammaticalStructuresF();
}
if (grammaticalStructures1 == null) {
grammaticalStructureHashMap.put(str1, SMX.getGrammaticalStructures1());
}
if (typedDependenciesF == null) {
typedDependenciesF = SMX.getTypedDependenciesF();
}
if (typedDependencies1 == null) {
typedDependenciesHashMap.put(str1, SMX.getTypedDependencies1());
}
if (rnnCoreAnnotationsPredictedF == null) {
rnnCoreAnnotationsPredictedF = SMX.getRnnCoreAnnotationsPredictedF();
}
if (rnnCoreAnnotationsPredicted1 == null) {
rnnCoreAnnotationsPredictedHashMap.put(str1, SMX.getRnnCoreAnnotationsPredicted1());
}
if (simpleMatricesF == null) {
simpleMatricesF = SMX.getSimpleMatricesF();
}
if (simpleMatrices1 == null) {
simpleMatricesHashMap.put(str1, SMX.getSimpleMatrices1());
}
if (simpleMatricesNodevectorsF == null) {
simpleMatricesNodevectorsF = SMX.getSimpleMatricesNodevectorsF();
}
if (simpleMatricesNodevectors1 == null) {
simpleMatricesNodevectorsHashMap.put(str1, SMX.getSimpleMatricesNodevectors1());
}
if (listF == null) {
listF = SMX.getListF();
}
if (list1 == null) {
listHashMap.put(str1, SMX.getList1());
}
if (longestF == null) {
longestF = SMX.getLongestF();
}
if (longest1 == null) {
longestHashMap.put(str1, SMX.getLongest1());
if (sentencesF == null) {
sentencesF = SMX.getSentencesF();
}
if (sentencesSentimentF == null) {
sentencesSentimentF = SMX.getSentencesSentimentF();
}
if (treesF == null) {
treesF = SMX.getTreesF();
}
if (grammaticalStructuresF == null) {
grammaticalStructuresF = SMX.getGrammaticalStructuresF();
}
if (sentimentLongestF == null) {
sentimentLongestF = SMX.getSentimentLongestF();
}
if (sentimentLongest1 == null) {
sentimentHashMap.put(str1, SMX.getSentimentLongest1());
}
if (imwesF == null) {
imwesF = SMX.getImwesF();
}
if (imwes1 == null) {
imwesHashMap.put(str1, SMX.getImwes1());
}
if (InflectedCounterNegativeF == null) {
InflectedCounterNegativeF = SMX.getInflectedCounterNegativeF();
}
if (InflectedCounterNegative1 == null) {
InflectedCounterNegativeHashMap.put(str1, SMX.getInflectedCounterNegative1());
}
if (InflectedCounterPositiveF == null) {
InflectedCounterPositiveF = SMX.getInflectedCounterPositiveF();
}
if (InflectedCounterPositive1 == null) {
InflectedCounterPositiveHashMap.put(str1, SMX.getInflectedCounterPositive1());
}
if (tokenEntryF == null) {
tokenEntryF = SMX.getTokenEntryF();
}
if (tokenEntry1 == null) {
tokenEntryHashMap.put(str1, SMX.getTokenEntry1());
}
if (MarkedContinuousCounterF == null) {
MarkedContinuousCounterF = SMX.getMarkedContinuousCounterF();
}
if (MarkedContinuousCounter1 == null) {
MarkedContinuousCounterHashMap.put(str1, SMX.getMarkedContinuousCounter1());
}
if (UnmarkedPatternCounterF == null) {
UnmarkedPatternCounterF = SMX.getUnmarkedPatternCounterF();
}
if (UnmarkedPatternCounter1 == null) {
UnmarkedPatternCounterHashMap.put(str1, SMX.getUnmarkedPatternCounter1());
}
if (strTokensIpartFormF == null) {
strTokensIpartFormF = SMX.getStrTokensIpartFormF();
}
if (strTokensIpartForm1 == null) {
strTokensIpartFormHashMap.put(str1, SMX.getStrTokensIpartForm1());
}
if (tokenFormsF == null) {
tokenFormsF = SMX.getTokenFormsF();
}
if (tokenForms1 == null) {
tokenFormsHashMap.put(str1, SMX.getTokenForms1());
}
if (strTokenEntryGetPOSF == null) {
strTokenEntryGetPOSF = SMX.getStrTokenEntryGetPOSF();
}
if (strTokenEntryGetPOS1 == null) {
strTokenEntryGetPOSHashMap.put(str1, SMX.getStrTokenEntryGetPOS1());
}
if (intTokenEntyCountsF == null) {
intTokenEntyCountsF = SMX.getIntTokenEntyCountsF();
}
if (intTokenEntyCounts1 == null) {
intTokenEntyCountsHashMap.put(str1, SMX.getIntTokenEntyCounts1());
if (MarkedContinuousCounterF == null) {
MarkedContinuousCounterF = SMX.getMarkedContinuousCounterF();
}
if (ITokenTagsF == null) {
ITokenTagsF = SMX.getITokenTagsF();
}
if (ITokenTags1 == null) {
ITokenTagsHashMap.put(str1, SMX.getITokenTags1());
if (strTokenEntryGetPOSF == null) {
strTokenEntryGetPOSF = SMX.getStrTokenEntryGetPOSF();
}
if (strTokenStemsF == null) {
strTokenStemsF = SMX.getStrTokenStemsF();
}
if (strTokenStems1 == null) {
strTokenStemsHashMap.put(str1, SMX.getStrTokenStems1());
}
if (AnotatorcounterF == null) {
AnotatorcounterF = SMX.getAnotatorcounterF();
}
if (Anotatorcounter1 == null) {
AnotatorcounterHashMap.put(str1, SMX.getAnotatorcounter1());
}
if (TokensCounterF == null) {
TokensCounterF = SMX.getTokensCounterF();
}
if (TokensCounter1 == null) {
TokensCounterHashMap.put(str1, SMX.getTokensCounter1());
}
if (entityTokenTagsF == null) {
entityTokenTagsF = SMX.getEntityTokenTagsF();
}
if (entityTokenTags1 == null) {
entityTokenTagsHashMap.put(str1, SMX.getEntityTokenTags1());
}
if (nerEntitiesF == null) {
nerEntitiesF = SMX.getNerEntitiesF();
}
if (nerEntities1 == null) {
nerEntitiesHashMap.put(str1, SMX.getNerEntities1());
}
if (nerEntitiesTypeF == null) {
nerEntitiesTypeF = SMX.getNerEntitiesTypeF();
}
if (nerEntitiesType1 == null) {
nerEntitiesTypeHashMap.put(str1, SMX.getNerEntitiesType1());
}
if (stopWordTokenF == null) {
stopWordTokenF = SMX.getStopWordTokenF();
}
if (stopWordToken1 == null) {
stopWordTokenHashMap.put(str1, SMX.getStopWordToken1());
}
if (stopWordLemmaF == null) {
stopWordLemmaF = SMX.getStopWordLemmaF();
}
if (stopWordLemma1 == null) {
stopWordLemmaHashMap.put(str1, SMX.getStopWordLemma1());
if (retrieveTGWListF == null) {
retrieveTGWListF = SMX.getRetrieveTGWListF();
}
if (PairCounterF == null) {
PairCounterF = SMX.getPairCounterF();
}
if (PairCounter1 == null) {
PairCounterHashMap.put(str1, SMX.getPairCounter1());
if (TokensCounterF == null) {
TokensCounterF = SMX.getTokensCounterF();
}
if (stopWordLemmaF == null) {
stopWordLemmaF = SMX.getStopWordLemmaF();
}
if (nerEntitiesF == null) {
nerEntitiesF = SMX.getNerEntitiesF();
}
if (stopWordTokenF == null) {
stopWordTokenF = SMX.getStopWordTokenF();
}
if (entityTokenTagsF == null) {
entityTokenTagsF = SMX.getEntityTokenTagsF();
}
if (nerEntitiesTypeF == null) {
nerEntitiesTypeF = SMX.getNerEntitiesTypeF();
}
if (AnotatorcounterF == null) {
AnotatorcounterF = SMX.getAnotatorcounterF();
}
if (strTokenStemsF == null) {
strTokenStemsF = SMX.getStrTokenStemsF();
}
SimilarityMatrix getSMX = SMX.callSMX();
@ -583,19 +579,41 @@ public class Datahandler {
}
}
}
int cacheRequirement = 6500;
int cacheRequirement = 8500;
if (preRelationUserCounters > cacheRequirement && !ues_copy.contains(strF) && filterContent(strF)) {
DataMapper.InsertMYSQLStrings(strF);
DataMapper.checkStringsToDelete();
}
double randomLenghtPermit = strF.length() * (Math.random() * Math.random() * Math.random() * (Math.random() * 10));
double randomLenghtPermit = strF.length() * (Math.random() * Math.random() * (Math.random() * 10));
Collections.reverse(concurrentRelations);
ArrayList<String> mysqlUpdateLastUsed = new ArrayList();
if (!concurrentRelations.isEmpty()) {
boolean passedFirst = false;
int lastIter = 1;
for (String secondaryRelation : concurrentRelations) {
if (SB.toString().length() > randomLenghtPermit && !SB.toString().isEmpty()) {
break;
}
if (passedFirst && lastIter < concurrentRelations.size()) {
String testSTR = SB.toString() + " " + secondaryRelation;
SentimentAnalyzerTest SMX = getReponseFuturesHelper(strF, testSTR, stanfordCoreNLP, stanfordCoreNLPSentiment,
coreMaps1, strAnno, strAnnoSentiment, coreDocument, tokenizeCountingF, taggedWordListF
, typedDependenciesF, rnnCoreAnnotationsPredictedF, simpleMatricesF, simpleMatricesNodevectorsF
, listF, longestF, sentencesF, sentencesSentimentF, treesF, grammaticalStructuresF, sentimentLongestF
, imwesF, InflectedCounterNegativeF, InflectedCounterPositiveF, tokenEntryF, UnmarkedPatternCounterF
, strTokensIpartFormF, tokenFormsF, intTokenEntyCountsF, MarkedContinuousCounterF, ITokenTagsF
, strTokenEntryGetPOSF, retrieveTGWListF, PairCounterF, TokensCounterF, stopWordLemmaF, nerEntitiesF
, stopWordTokenF, entityTokenTagsF, nerEntitiesTypeF, AnotatorcounterF, strTokenStemsF);
SimilarityMatrix getSMX = SMX.callSMX();
double scoreRelationLastUserMsg = getSMX.getDistance();
if (preRelationUserCounters > scoreRelationLastUserMsg) {
break;
}
}
passedFirst = true;
lastIter++;
SB.append(secondaryRelation).append(" ");
mysqlUpdateLastUsed.add(secondaryRelation);
}

View File

@ -89,7 +89,8 @@ public class DiscordHandler extends ListenerAdapter {
private static StanfordCoreNLP stanfordCoreNLP;
private static Datahandler datahandler;
private static StanfordCoreNLP stanfordCoreNLPSentiment;
private static ExecutorService executorService = Executors.newFixedThreadPool(2);
private static ExecutorService executorService = Executors.newFixedThreadPool(1);
private static ExecutorService executorServiceIngame = Executors.newFixedThreadPool(1);
//TODO add python program that edits the java code. python program just adds test if statements on
//variables until the tests pass
@ -113,14 +114,17 @@ public class DiscordHandler extends ListenerAdapter {
//make sure not to use ports that are already occupied.
for (int i = 0; i < autismbotCount; i++) {
final int j = i;
new Thread(() -> {
ArrayList<Integer> ports = new ArrayList<Integer>();
ports.add(48475);
ports.add(48476);
ports.add(48477);
ports.add(48478);
handleUDPTraffic(ports.get(j), datahandler, stanfordCoreNLP, stanfordCoreNLPSentiment);
}).start();
executorServiceIngame.execute(new Runnable() {
@Override
public void run() {
ArrayList<Integer> ports = new ArrayList<Integer>();
ports.add(48475);
ports.add(48476);
ports.add(48477);
ports.add(48478);
handleUDPTraffic(ports.get(j), datahandler, stanfordCoreNLP, stanfordCoreNLPSentiment);
}
});
}
}
@ -134,7 +138,8 @@ public class DiscordHandler extends ListenerAdapter {
content = content.replace(member.getId(), "");
}
}
if (username != null && !event.getAuthor().isBot() && !content.isEmpty()) {
if (username != null && !event.getAuthor().isBot() && !content.isEmpty()
&& event.getMessage().getCategory() != null) {
String channelName = event.getMessage().getChannel().getName().toLowerCase();
boolean channelpermissionsDenied = false;
if (channelName.contains("suggestion-box")) {