added dependency updates, remade stuff to updates. fixed removing sentences. added null checks
This commit is contained in:
parent
241135566a
commit
509cd2cbe9
@ -1,5 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.mycompany</groupId>
|
||||
<artifactId>ArtificialAutism</artifactId>
|
||||
@ -32,28 +33,29 @@
|
||||
<artifactId>ws4j</artifactId>
|
||||
<version>1.0.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>StanfordParser</groupId>
|
||||
<artifactId>StanfordParser</artifactId>
|
||||
<version>1.0</version>
|
||||
|
||||
<dependency>
|
||||
<groupId>stanford-corenlp-models-english</groupId>
|
||||
<artifactId>stanford-corenlp-models-english</artifactId>
|
||||
<version>4.3.1</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>StanfordParserModel</groupId>
|
||||
<artifactId>StanfordParserModel</artifactId>
|
||||
<version>1.0</version>
|
||||
|
||||
<groupId>ejml-simple</groupId>
|
||||
<artifactId>ejml-simple</artifactId>
|
||||
<version>0.39</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>srParser</groupId>
|
||||
<artifactId>srParser</artifactId>
|
||||
<version>1</version>
|
||||
|
||||
<groupId>ejml-core</groupId>
|
||||
<artifactId>ejml-core</artifactId>
|
||||
<version>0.39</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>ejml</groupId>
|
||||
<artifactId>ejml</artifactId>
|
||||
<version>0.2.3</version>
|
||||
<groupId>ejml-ddense</groupId>
|
||||
<artifactId>ejml-ddense</artifactId>
|
||||
<version>0.39</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
@ -68,23 +70,28 @@
|
||||
<version>1.0.2</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>CoreNLP</groupId>
|
||||
<artifactId>CoreNLP</artifactId>
|
||||
<version>1.0</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
<artifactId>lucene-analyzers-common</artifactId>
|
||||
<version>7.2.0</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>edu.stanford.nlp</groupId>
|
||||
<artifactId>stanford-corenlp</artifactId>
|
||||
<version>4.3.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>edu.stanford.nlp</groupId>
|
||||
<artifactId>stanford-corenlp</artifactId>
|
||||
<version>4.3.1</version>
|
||||
<classifier>models</classifier>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.discord4j</groupId>
|
||||
<artifactId>discord4j-core</artifactId>
|
||||
<version>3.1.1</version>
|
||||
<version>3.1.7</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jetbrains.kotlin</groupId>
|
||||
|
@ -85,7 +85,7 @@ public class DataMapper {
|
||||
Connection l_cCon = null;
|
||||
PreparedStatement l_pStatement = null;
|
||||
ResultSet l_rsSearch = null;
|
||||
String l_sSQL = "delete from Sentences where last_used < NOW() - INTERVAL 4 WEEK LIMIT 55";
|
||||
String l_sSQL = "delete from Sentences order by last_used asc LIMIT 15";
|
||||
try {
|
||||
l_cCon = DBCPDataSource.getConnection();
|
||||
l_pStatement = l_cCon.prepareStatement(l_sSQL);
|
||||
|
@ -42,8 +42,10 @@ public class Datahandler {
|
||||
private var pipelineSentimentAnnotationCache = HashMap<String, Annotation>()
|
||||
private var coreDocumentAnnotationCache: HashMap<String, CoreDocument>
|
||||
private var jmweAnnotationCache = HashMap<String, Annotation>()
|
||||
private val stringCache = ArrayList<String>()
|
||||
private val nerModel = "edu/stanford/nlp/models/ner/english.all.3class.caseless.distsim.crf.ser.gz"
|
||||
private var stringCache = ArrayList<String>()
|
||||
|
||||
//private val nerModel = "edu/stanford/nlp/models/ner/english.all.3class.caseless.distsim.crf.ser.gz"
|
||||
private val nerModel = "edu/stanford/nlp/models/ner/english.all.3class.distsim.crf.ser.gz"
|
||||
private var tagger: MaxentTagger = MaxentTagger()
|
||||
private var gsf: GrammaticalStructureFactory
|
||||
private var classifier: AbstractSequenceClassifier<CoreLabel>
|
||||
@ -99,8 +101,9 @@ public class Datahandler {
|
||||
|
||||
fun initiateGrammaticalStructureFactory(): GrammaticalStructureFactory {
|
||||
val options = arrayOf("-maxLength", "100")
|
||||
val lexParserEnglishRNN = "edu/stanford/nlp/models/lexparser/englishRNN.ser.gz"
|
||||
val lp = LexicalizedParser.loadModel(lexParserEnglishRNN, *options)
|
||||
//val lexParserEnglishRNN = "edu/stanford/nlp/models/lexparser/englishRNN.ser.gz"
|
||||
val lexParserEnglishPCFG = "edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
|
||||
val lp = LexicalizedParser.loadModel(lexParserEnglishPCFG, *options)
|
||||
val tlp = lp.getOp().langpack()
|
||||
return tlp.grammaticalStructureFactory()
|
||||
}
|
||||
@ -108,8 +111,10 @@ public class Datahandler {
|
||||
public fun pipeLineSetUp(): StanfordCoreNLP {
|
||||
val props = Properties()
|
||||
val shiftReduceParserPath = "edu/stanford/nlp/models/srparser/englishSR.ser.gz"
|
||||
val nerModel2 = "edu/stanford/nlp/models/ner/english.conll.4class.caseless.distsim.crf.ser.gz"
|
||||
val nerModel3 = "edu/stanford/nlp/models/ner/english.muc.7class.caseless.distsim.crf.ser.gz"
|
||||
//val nerModel2 = "edu/stanford/nlp/models/ner/english.conll.4class.caseless.distsim.crf.ser.gz"
|
||||
val nerModel2 = "edu/stanford/nlp/models/ner/english.conll.4class.distsim.crf.ser.gz"
|
||||
//val nerModel3 = "edu/stanford/nlp/models/ner/english.muc.7class.caseless.distsim.crf.ser.gz"
|
||||
val nerModel3 = "edu/stanford/nlp/models/ner/english.muc.7class.distsim.crf.ser.gz"
|
||||
props.setProperty("annotators", "tokenize,ssplit,pos,lemma,ner,parse")
|
||||
props.setProperty("parse.model", shiftReduceParserPath)
|
||||
props.setProperty("parse.maxlen", "90")
|
||||
@ -129,12 +134,14 @@ public class Datahandler {
|
||||
|
||||
fun shiftReduceParserInitiate(): StanfordCoreNLP {
|
||||
val propsSentiment = Properties()
|
||||
val lexParserEnglishRNN = "edu/stanford/nlp/models/lexparser/englishRNN.ser.gz"
|
||||
//val lexParserEnglishRNN = "edu/stanford/nlp/models/lexparser/englishRNN.ser.gz"
|
||||
val lexParserEnglishPCFG = "edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
|
||||
val sentimentModel = "edu/stanford/nlp/models/sentiment/sentiment.ser.gz"
|
||||
val taggerPath = "edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger"
|
||||
//val taggerPath = "edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger"
|
||||
val taggerPath = "edu/stanford/nlp/models/pos-tagger/english-left3words-distsim.tagger"
|
||||
val customStopWordList = "start,starts,period,periods,a,an,and,are,as,at,be,but,by,for,if,in,into,is,it,no,not,of," +
|
||||
"on,or,such,that,the,their,then,there,these,they,this,to,was,will,with"
|
||||
propsSentiment.setProperty("parse.model", lexParserEnglishRNN)
|
||||
propsSentiment.setProperty("parse.model", lexParserEnglishPCFG)
|
||||
propsSentiment.setProperty("sentiment.model", sentimentModel)
|
||||
propsSentiment.setProperty("parse.maxlen", "90")
|
||||
propsSentiment.setProperty("threads", "5")
|
||||
@ -163,6 +170,8 @@ public class Datahandler {
|
||||
val arrayList = java.util.ArrayList<String>(stringCache)
|
||||
DataMapper.InsertMYSQLStrings(arrayList)
|
||||
DataMapper.checkStringsToDelete();
|
||||
stringCache = ArrayList<String>();
|
||||
initiateMYSQL();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,9 +81,7 @@ public class DoStuff {
|
||||
}
|
||||
}
|
||||
}
|
||||
new Thread(() -> {
|
||||
datahandler.updateStringCache();
|
||||
}).start();
|
||||
datahandler.updateStringCache();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1772,6 +1772,9 @@ public class SentimentAnalyzerTest {
|
||||
}
|
||||
|
||||
private ArrayList<String> getnerEntities(CoreDocument coreDocument) {
|
||||
if (coreDocument == null || coreDocument.entityMentions() == null) {
|
||||
return new ArrayList<String>();
|
||||
}
|
||||
ArrayList<String> arrs = new ArrayList<>();
|
||||
for (CoreEntityMention em : coreDocument.entityMentions()) {
|
||||
if (!arrs.contains(em.text())) {
|
||||
@ -1782,6 +1785,9 @@ public class SentimentAnalyzerTest {
|
||||
}
|
||||
|
||||
private ArrayList<String> getnerEntitiesType(CoreDocument coreDocument) {
|
||||
if (coreDocument == null || coreDocument.entityMentions() == null) {
|
||||
return new ArrayList<String>();
|
||||
}
|
||||
ArrayList<String> arrs = new ArrayList<>();
|
||||
for (CoreEntityMention em : coreDocument.entityMentions()) {
|
||||
if (!arrs.contains(em.entityType())) {
|
||||
@ -1841,22 +1847,27 @@ public class SentimentAnalyzerTest {
|
||||
}
|
||||
|
||||
private ArrayList<String> getentityTokenTags(CoreDocument coreDocument) {
|
||||
if (coreDocument == null || coreDocument.entityMentions() == null) {
|
||||
return new ArrayList<String>();
|
||||
}
|
||||
ArrayList<String> arrs = new ArrayList<>();
|
||||
for (CoreEntityMention em : coreDocument.entityMentions()) {
|
||||
List<CoreLabel> tokens = em.tokens();
|
||||
String entityType = em.entityType();
|
||||
Double EntityConfidences = 0.0;
|
||||
Set<Map.Entry<String, Double>> entrySet = em.entityTypeConfidences().entrySet();
|
||||
for (Map.Entry<String, Double> entries : entrySet) {
|
||||
if (EntityConfidences < entries.getValue()) {
|
||||
EntityConfidences = entries.getValue();
|
||||
if (coreDocument != null) {
|
||||
for (CoreEntityMention em : coreDocument.entityMentions()) {
|
||||
List<CoreLabel> tokens = em.tokens();
|
||||
String entityType = em.entityType();
|
||||
Double EntityConfidences = 0.0;
|
||||
Set<Map.Entry<String, Double>> entrySet = em.entityTypeConfidences().entrySet();
|
||||
for (Map.Entry<String, Double> entries : entrySet) {
|
||||
if (EntityConfidences < entries.getValue()) {
|
||||
EntityConfidences = entries.getValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
for (CoreLabel token : tokens) {
|
||||
if (token != null) {
|
||||
if (!arrs.contains(token.tag())) {
|
||||
if (entityType.equals("PERSON") && EntityConfidences > 0.80) {
|
||||
arrs.add(token.tag());
|
||||
for (CoreLabel token : tokens) {
|
||||
if (token != null) {
|
||||
if (!arrs.contains(token.tag())) {
|
||||
if (entityType.equals("PERSON") && EntityConfidences > 0.80) {
|
||||
arrs.add(token.tag());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ package PresentationLayer;
|
||||
import DataLayer.settings;
|
||||
import FunctionLayer.Datahandler;
|
||||
import FunctionLayer.PipelineJMWESingleton;
|
||||
import com.sun.tools.javac.util.List;
|
||||
import discord4j.core.DiscordClient;
|
||||
import discord4j.core.GatewayDiscordClient;
|
||||
import discord4j.core.event.domain.message.MessageCreateEvent;
|
||||
@ -13,6 +12,7 @@ import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.*;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
|
||||
|
||||
/**
|
||||
@ -95,7 +95,11 @@ public class DiscordHandler {
|
||||
for (int i = 0; i < autismbotCount; i++) {
|
||||
final int j = i;
|
||||
new Thread(() -> {
|
||||
List<Integer> ports = List.of(48475, 48476, 48477, 48478);
|
||||
ArrayList<Integer> ports = new ArrayList<Integer>();
|
||||
ports.add(48475);
|
||||
ports.add(48476);
|
||||
ports.add(48477);
|
||||
ports.add(48478);
|
||||
handleUDPTraffic(ports.get(j), datahandler, stanfordCoreNLP, stanfordCoreNLPSentiment);
|
||||
}).start();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user