autism
This commit is contained in:
parent
fc01f7fb43
commit
57bbd5d55e
14
ArtificialAutism/NOTES.txt
Normal file
14
ArtificialAutism/NOTES.txt
Normal file
@ -0,0 +1,14 @@
|
||||
for running this you need to include stanford parser:
|
||||
https://nlp.stanford.edu/software/lex-parser.shtml#Download
|
||||
|
||||
if you want to run it on a remote machine with the current POM setup you need to include all jars which
|
||||
means also the stanford parser jars filling around 620 MB, this jar is currently not included in the uploaded /libs folder
|
||||
because its too large, download it from the link above and put it in the /libs folder of the remote machine to run it on
|
||||
|
||||
now also requires: https://nlp.stanford.edu/software/stanford-srparser-2014-10-23-models.jar
|
||||
kinda fucking huge
|
||||
|
||||
now requires about 4 GB ram atleast to run.
|
||||
also now requires nlp.stanford.edu/software/stanford-corenlp-full-2018-10-05.zip
|
||||
because it needs lexicalized parsers which shift reduce parser does not posses.
|
||||
also requires the regular stanford parser except of just shift reduce parser
|
46
ArtificialAutism/nbactions.xml
Normal file
46
ArtificialAutism/nbactions.xml
Normal file
@ -0,0 +1,46 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<actions>
|
||||
<action>
|
||||
<actionName>run</actionName>
|
||||
<packagings>
|
||||
<packaging>jar</packaging>
|
||||
</packagings>
|
||||
<goals>
|
||||
<goal>process-classes</goal>
|
||||
<goal>org.codehaus.mojo:exec-maven-plugin:1.2.1:exec</goal>
|
||||
</goals>
|
||||
<properties>
|
||||
<exec.args>-classpath %classpath PresentationLayer.DiscordHandler</exec.args>
|
||||
<exec.executable>java</exec.executable>
|
||||
</properties>
|
||||
</action>
|
||||
<action>
|
||||
<actionName>debug</actionName>
|
||||
<packagings>
|
||||
<packaging>jar</packaging>
|
||||
</packagings>
|
||||
<goals>
|
||||
<goal>process-classes</goal>
|
||||
<goal>org.codehaus.mojo:exec-maven-plugin:1.2.1:exec</goal>
|
||||
</goals>
|
||||
<properties>
|
||||
<exec.args>-Xdebug -Xrunjdwp:transport=dt_socket,server=n,address=${jpda.address} -classpath %classpath PresentationLayer.DiscordHandler</exec.args>
|
||||
<exec.executable>java</exec.executable>
|
||||
<jpda.listen>true</jpda.listen>
|
||||
</properties>
|
||||
</action>
|
||||
<action>
|
||||
<actionName>profile</actionName>
|
||||
<packagings>
|
||||
<packaging>jar</packaging>
|
||||
</packagings>
|
||||
<goals>
|
||||
<goal>process-classes</goal>
|
||||
<goal>org.codehaus.mojo:exec-maven-plugin:1.2.1:exec</goal>
|
||||
</goals>
|
||||
<properties>
|
||||
<exec.args>-classpath %classpath PresentationLayer.DiscordHandler</exec.args>
|
||||
<exec.executable>java</exec.executable>
|
||||
</properties>
|
||||
</action>
|
||||
</actions>
|
99
ArtificialAutism/pom.xml
Normal file
99
ArtificialAutism/pom.xml
Normal file
@ -0,0 +1,99 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.mycompany</groupId>
|
||||
<artifactId>ArtificialAutism</artifactId>
|
||||
<version>1.0</version>
|
||||
<packaging>jar</packaging>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-dbcp2</artifactId>
|
||||
<version>2.5.0</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>mysql</groupId>
|
||||
<artifactId>mysql-connector-java</artifactId>
|
||||
<version>8.0.13</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.javacord</groupId>
|
||||
<artifactId>javacord-api</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<type>jar</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.javacord</groupId>
|
||||
<artifactId>javacord</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<type>pom</type>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>26.0-jre</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.mpkorstanje</groupId>
|
||||
<artifactId>simmetrics-core</artifactId>
|
||||
<version>4.1.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>ws4j</groupId>
|
||||
<artifactId>ws4j</artifactId>
|
||||
<version>1.0.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>StanfordParser</groupId>
|
||||
<artifactId>StanfordParser</artifactId>
|
||||
<version>1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>StanfordParserModel</groupId>
|
||||
<artifactId>StanfordParserModel</artifactId>
|
||||
<version>1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>srParser</groupId>
|
||||
<artifactId>srParser</artifactId>
|
||||
<version>1</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<properties>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<maven.compiler.source>1.8</maven.compiler.source>
|
||||
<maven.compiler.target>1.8</maven.compiler.target>
|
||||
<mainClass>PresentationLayer.DiscordHandler</mainClass>
|
||||
</properties>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-dependency-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>install</phase>
|
||||
<goals>
|
||||
<goal>copy-dependencies</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<outputDirectory>${project.build.directory}/lib</outputDirectory>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addClasspath>true</addClasspath>
|
||||
<classpathPrefix>lib/</classpathPrefix>
|
||||
<mainClass>${mainClass}</mainClass>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
42
ArtificialAutism/src/main/java/DataLayer/DBCPDataSource.java
Normal file
42
ArtificialAutism/src/main/java/DataLayer/DBCPDataSource.java
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package DataLayer;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.apache.commons.dbcp2.BasicDataSource;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class DBCPDataSource {
|
||||
private static BasicDataSource ds = new BasicDataSource();
|
||||
static {
|
||||
try {
|
||||
ds.setDriver(new com.mysql.cj.jdbc.Driver());
|
||||
ds.setUrl("jdbc:mysql://151.80.230.149:3306/ArtificialAutism?useLegacyDatetimeCode=false&serverTimezone=UTC");
|
||||
//ds.setUrl("jdbc:mysql://localhost:3306/ArtificialAutism?useLegacyDatetimeCode=false&serverTimezone=UTC");
|
||||
ds.setUsername("ArtificialAutism");
|
||||
ds.setPassword("b423b54bwbfb1340438fn");
|
||||
ds.setMaxTotal(-1);
|
||||
ds.setMinIdle(5);
|
||||
ds.setMaxIdle(-1);
|
||||
ds.setMaxOpenPreparedStatements(100);
|
||||
} catch (SQLException ex) {
|
||||
Logger.getLogger(DBCPDataSource.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static Connection getConnection() throws SQLException {
|
||||
return ds.getConnection();
|
||||
}
|
||||
|
||||
private DBCPDataSource() {
|
||||
}
|
||||
}
|
194
ArtificialAutism/src/main/java/DataLayer/DataMapper.java
Normal file
194
ArtificialAutism/src/main/java/DataLayer/DataMapper.java
Normal file
@ -0,0 +1,194 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package DataLayer;
|
||||
|
||||
import FunctionLayer.SimilarityMatrix;
|
||||
import FunctionLayer.CustomError;
|
||||
import java.sql.Connection;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class DataMapper {
|
||||
|
||||
public static void createTables() throws CustomError {
|
||||
Connection l_cCon = null;
|
||||
PreparedStatement l_pStatement = null;
|
||||
ResultSet l_rsSearch = null;
|
||||
try {
|
||||
l_cCon = DBCPDataSource.getConnection();
|
||||
String l_sSQL = "CREATE TABLE IF NOT EXISTS `ArtificialAutism`.`Sentences` (`Strings` VARCHAR(256) NOT NULL, PRIMARY KEY (`Strings`))\n"
|
||||
+ "ENGINE = InnoDB;";
|
||||
l_pStatement = l_cCon.prepareStatement(l_sSQL);
|
||||
l_pStatement.execute();
|
||||
l_sSQL = "CREATE TABLE IF NOT EXISTS `ArtificialAutism`.`WordMatrix` (`Str1` VARCHAR(254) NOT NULL, `Str2` VARCHAR(254) NOT NULL,\n"
|
||||
+ " `Distance` DOUBLE NOT NULL, `ID` INT NOT NULL AUTO_INCREMENT,\n"
|
||||
+ " PRIMARY KEY (`ID`))\n"
|
||||
+ "ENGINE = InnoDB;";
|
||||
l_pStatement = l_cCon.prepareStatement(l_sSQL);
|
||||
l_pStatement.execute();
|
||||
} catch (SQLException ex) {
|
||||
throw new CustomError("failed in DataMapper " + ex.getMessage());
|
||||
} finally {
|
||||
CloseConnections(l_pStatement, l_rsSearch, l_cCon);
|
||||
}
|
||||
}
|
||||
|
||||
public static List<String> getAllStrings() throws CustomError {
|
||||
List<String> str = new ArrayList();
|
||||
Connection l_cCon = null;
|
||||
PreparedStatement l_pStatement = null;
|
||||
ResultSet l_rsSearch = null;
|
||||
try {
|
||||
l_cCon = DBCPDataSource.getConnection();
|
||||
String l_sSQL = "SELECT * FROM `Sentences`";
|
||||
l_pStatement = l_cCon.prepareStatement(l_sSQL, java.sql.ResultSet.TYPE_FORWARD_ONLY,
|
||||
java.sql.ResultSet.CONCUR_READ_ONLY);
|
||||
l_pStatement.setFetchSize(Integer.MIN_VALUE);
|
||||
l_rsSearch = l_pStatement.executeQuery();
|
||||
while (l_rsSearch.next()) {
|
||||
str.add(l_rsSearch.getString(1));
|
||||
}
|
||||
} catch (SQLException ex) {
|
||||
throw new CustomError("failed in DataMapper " + ex.getMessage());
|
||||
} finally {
|
||||
CloseConnections(l_pStatement, l_rsSearch, l_cCon);
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
public static void InsertMYSQLStrings(List<String> str) throws CustomError {
|
||||
Connection l_cCon = null;
|
||||
PreparedStatement l_pStatement = null;
|
||||
ResultSet l_rsSearch = null;
|
||||
String l_sSQL = "INSERT IGNORE `Sentences` (`Strings`) VALUES (?)";
|
||||
try {
|
||||
if (str != null && str.size() > 0) {
|
||||
l_cCon = DBCPDataSource.getConnection();
|
||||
l_pStatement = l_cCon.prepareStatement(l_sSQL, java.sql.ResultSet.TYPE_FORWARD_ONLY,
|
||||
java.sql.ResultSet.CONCUR_READ_ONLY);
|
||||
l_pStatement.setFetchSize(Integer.MIN_VALUE);
|
||||
for (String str1 : str) {
|
||||
System.out.println("adding str1: " + str1 + "\n");
|
||||
l_pStatement.setString(1, str1);
|
||||
l_pStatement.addBatch();
|
||||
}
|
||||
l_pStatement.executeBatch();
|
||||
}
|
||||
} catch (SQLException ex) {
|
||||
throw new CustomError("failed in DataMapper " + ex.getMessage());
|
||||
} finally {
|
||||
CloseConnections(l_pStatement, l_rsSearch, l_cCon);
|
||||
}
|
||||
}
|
||||
|
||||
public static int getSementicsDBRows() {
|
||||
int count = 0;
|
||||
try (Connection l_cCon = DBCPDataSource.getConnection()) {
|
||||
try (Statement s = l_cCon.createStatement();
|
||||
ResultSet r = s.executeQuery("SELECT COUNT(*) AS rowcount FROM WordMatrix")) {
|
||||
r.next();
|
||||
count = r.getInt("rowcount");
|
||||
}
|
||||
} catch (SQLException ex) {
|
||||
Logger.getLogger(DataMapper.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
public static List<SimilarityMatrix> getAllSementicMatrixes() throws CustomError {
|
||||
//https://stackoverflow.com/questions/5157476/resultset-behavior-with-mysql-database-does-it-store-all-rows-in-memory/5159999#5159999
|
||||
//https://stackoverflow.com/questions/3682614/how-to-read-all-rows-from-huge-table
|
||||
int count = getSementicsDBRows();
|
||||
int counter2 = 0;
|
||||
int hardCapRetrieveCount = 500000;
|
||||
List<SimilarityMatrix> WS4JList = new ArrayList(count + 1);
|
||||
while (count > counter2) {
|
||||
try (Connection l_cCon = DBCPDataSource.getConnection()) {
|
||||
l_cCon.setAutoCommit(false);
|
||||
String l_sSQL = "SELECT * FROM `WordMatrix` WHERE ID > " + counter2 + " AND ID < " + (counter2 + hardCapRetrieveCount);
|
||||
try (PreparedStatement l_pStatement = l_cCon.prepareStatement(l_sSQL, java.sql.ResultSet.TYPE_FORWARD_ONLY,
|
||||
java.sql.ResultSet.CONCUR_READ_ONLY)) {
|
||||
l_pStatement.setFetchSize(Integer.MIN_VALUE);
|
||||
try (ResultSet l_rsSearch = l_pStatement.executeQuery()) {
|
||||
int i = 0;
|
||||
while (l_rsSearch.next() && i < hardCapRetrieveCount) {
|
||||
SimilarityMatrix ws4j = new SimilarityMatrix(l_rsSearch.getString(1), l_rsSearch.getString(2), l_rsSearch.getDouble(3));
|
||||
//find something cheaper than arraylist probably
|
||||
WS4JList.add(ws4j);
|
||||
System.out.println("i: " + i + "\n" + "free memory: " + Runtime.getRuntime().freeMemory() + "\ncounter2: " + counter2 + "\n");
|
||||
i++;
|
||||
counter2++;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (SQLException ex) {
|
||||
Logger.getLogger(DataMapper.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
return WS4JList;
|
||||
}
|
||||
|
||||
public static void insertSementicMatrixes(List<SimilarityMatrix> WS4JListUpdate) throws CustomError {
|
||||
Connection l_cCon = null;
|
||||
PreparedStatement l_pStatement = null;
|
||||
ResultSet l_rsSearch = null;
|
||||
String l_sSQL = "INSERT IGNORE `WordMatrix` (`Str1`,`Str2`,`Distance`) VALUES (?, ?, ?)";
|
||||
try {
|
||||
l_cCon = DBCPDataSource.getConnection();
|
||||
l_pStatement = l_cCon.prepareStatement(l_sSQL, java.sql.ResultSet.TYPE_FORWARD_ONLY,
|
||||
java.sql.ResultSet.CONCUR_READ_ONLY);
|
||||
l_pStatement.setFetchSize(Integer.MIN_VALUE);
|
||||
System.out.println("Matrix update size: " + WS4JListUpdate.size());
|
||||
for (SimilarityMatrix ws4j : WS4JListUpdate) {
|
||||
l_pStatement.setString(1, ws4j.getPrimaryString());
|
||||
l_pStatement.setString(2, ws4j.getSecondaryString());
|
||||
l_pStatement.setDouble(3, ws4j.getDistance());
|
||||
l_pStatement.addBatch();
|
||||
}
|
||||
l_pStatement.executeBatch();
|
||||
} catch (SQLException ex) {
|
||||
throw new CustomError("failed in DataMapper " + ex.getMessage());
|
||||
} finally {
|
||||
CloseConnections(l_pStatement, l_rsSearch, l_cCon);
|
||||
}
|
||||
}
|
||||
|
||||
public static void CloseConnections(PreparedStatement ps, ResultSet rs, Connection con) {
|
||||
if (rs != null) {
|
||||
try {
|
||||
rs.close();
|
||||
} catch (SQLException ex) {
|
||||
Logger.getLogger(DataMapper.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
if (ps != null) {
|
||||
try {
|
||||
ps.close();
|
||||
} catch (SQLException ex) {
|
||||
Logger.getLogger(DataMapper.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
if (con != null) {
|
||||
try {
|
||||
con.close();
|
||||
} catch (SQLException ex) {
|
||||
Logger.getLogger(DataMapper.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package FunctionLayer;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class CustomError extends Exception {
|
||||
|
||||
public CustomError(String msg) {
|
||||
super(msg);
|
||||
}
|
||||
}
|
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package FunctionLayer;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class LevenshteinDistance {
|
||||
|
||||
private static int minimum(int a, int b, int c) {
|
||||
return Math.min(Math.min(a, b), c);
|
||||
}
|
||||
|
||||
public static int computeLevenshteinDistance(CharSequence lhs, CharSequence rhs) {
|
||||
int[][] distance = new int[lhs.length() + 1][rhs.length() + 1];
|
||||
|
||||
for (int i = 0; i <= lhs.length(); i++) {
|
||||
distance[i][0] = i;
|
||||
}
|
||||
for (int j = 1; j <= rhs.length(); j++) {
|
||||
distance[0][j] = j;
|
||||
}
|
||||
for (int i = 1; i <= lhs.length(); i++) {
|
||||
for (int j = 1; j <= rhs.length(); j++) {
|
||||
distance[i][j] = minimum(
|
||||
distance[i - 1][j] + 1,
|
||||
distance[i][j - 1] + 1,
|
||||
distance[i - 1][j - 1] + ((lhs.charAt(i - 1) == rhs.charAt(j - 1)) ? 0 : 1));
|
||||
}
|
||||
}
|
||||
return distance[lhs.length()][rhs.length()];
|
||||
}
|
||||
}
|
@ -0,0 +1,656 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
//https://stackoverflow.com/questions/43935229/hashmap-with-8-million-entries-becomes-slow
|
||||
//http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/6364/pdf/imm6364.pdf
|
||||
*/
|
||||
package FunctionLayer;
|
||||
|
||||
import DataLayer.DataMapper;
|
||||
import FunctionLayer.StanfordParser.SentimentAnalyzerTest;
|
||||
import com.google.common.base.Stopwatch;
|
||||
import com.google.common.collect.ArrayListMultimap;
|
||||
import com.google.common.collect.MapMaker;
|
||||
import com.google.common.collect.Multimap;
|
||||
import edu.stanford.nlp.ling.HasWord;
|
||||
import edu.stanford.nlp.ling.Label;
|
||||
import edu.stanford.nlp.ling.TaggedWord;
|
||||
import edu.stanford.nlp.ling.Word;
|
||||
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
|
||||
import edu.stanford.nlp.parser.shiftreduce.ShiftReduceParser;
|
||||
import edu.stanford.nlp.process.DocumentPreprocessor;
|
||||
import edu.stanford.nlp.tagger.maxent.MaxentTagger;
|
||||
import edu.stanford.nlp.trees.Tree;
|
||||
import edu.stanford.nlp.trees.TreebankLanguagePack;
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class MYSQLDatahandler {
|
||||
|
||||
public static final long EXPIRE_TIME_IN_SECONDS = TimeUnit.SECONDS.convert(10, TimeUnit.MINUTES);
|
||||
public static final long EXPIRE_TIME_IN_SECONDS1 = TimeUnit.SECONDS.convert(10, TimeUnit.HOURS);
|
||||
public static MYSQLDatahandler instance = new MYSQLDatahandler();
|
||||
public static int SemeticsUpdateCount;
|
||||
public static int threadCounter = 0;
|
||||
private volatile boolean RefreshMatrixFromDB;
|
||||
private final ConcurrentMap<Integer, String> StringCache;
|
||||
private List<SimilarityMatrix> SimilaritySMXList = new ArrayList();
|
||||
private List<String> multiprocessCalculations = new ArrayList();
|
||||
private final Stopwatch stopwatch;
|
||||
private final Stopwatch stopwatch1;
|
||||
|
||||
public MYSQLDatahandler() {
|
||||
this.stopwatch = Stopwatch.createUnstarted();
|
||||
this.stopwatch1 = Stopwatch.createStarted();
|
||||
this.StringCache = new MapMaker().concurrencyLevel(2).makeMap();
|
||||
}
|
||||
|
||||
private Map<Integer, String> getCache() throws SQLException, IOException, CustomError {
|
||||
List<String> strlist;
|
||||
strlist = DataMapper.getAllStrings();
|
||||
LinkedHashMap<Integer, String> LHM = new LinkedHashMap();
|
||||
int i = 0;
|
||||
for (String str : strlist) {
|
||||
LHM.put(i, str);
|
||||
i++;
|
||||
}
|
||||
return LHM;
|
||||
}
|
||||
|
||||
public void initiateMYSQL() throws SQLException, IOException {
|
||||
try {
|
||||
DataMapper.createTables();
|
||||
StringCache.putAll(getCache());
|
||||
SimilaritySMXList = DataMapper.getAllSementicMatrixes();
|
||||
} catch (CustomError ex) {
|
||||
Logger.getLogger(MYSQLDatahandler.class
|
||||
.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void checkIfUpdateMatrixes() {
|
||||
RefreshMatrixFromDB = false;
|
||||
int calculationBoundaries = 10;
|
||||
int updateBadgesInteger = 500;
|
||||
if (stopwatch1.elapsed(TimeUnit.SECONDS) >= EXPIRE_TIME_IN_SECONDS1) {
|
||||
RefreshMatrixFromDB = true;
|
||||
if (threadCounter == 0) {
|
||||
try {
|
||||
SimilaritySMXList = DataMapper.getAllSementicMatrixes();
|
||||
stopwatch1.reset();
|
||||
} catch (CustomError ex) {
|
||||
Logger.getLogger(MYSQLDatahandler.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (StringCache.values().size() > 10) {
|
||||
if (!RefreshMatrixFromDB && multiprocessCalculations.size() <= (calculationBoundaries * calculationBoundaries)) {
|
||||
threadCounter++;
|
||||
List<String> strList = new ArrayList(StringCache.values());
|
||||
SemeticsUpdateCount = new Random().nextInt(strList.size() - 6);
|
||||
int beginindex = SemeticsUpdateCount;
|
||||
SemeticsUpdateCount += calculationBoundaries / 2;
|
||||
int temp = SemeticsUpdateCount;
|
||||
List<String> strIndexNavigator = new ArrayList();
|
||||
strList.subList(beginindex, temp).forEach((str) -> {
|
||||
strIndexNavigator.add(str);
|
||||
multiprocessCalculations.add(str);
|
||||
});
|
||||
new Thread(() -> {
|
||||
List<String> strIndexNavigatorL = new ArrayList(strIndexNavigator);
|
||||
List<String> strIndexAll = new ArrayList(strList);
|
||||
List<String> randomIndexesToUpdate = new ArrayList();
|
||||
int indexes = updateBadgesInteger;
|
||||
if (indexes >= strIndexAll.size()) {
|
||||
indexes = strIndexAll.size() - 1;
|
||||
}
|
||||
int beginindexes = new Random().nextInt((strIndexAll.size()) - indexes);
|
||||
strIndexAll.subList(beginindexes, beginindexes + indexes).forEach((str) -> {
|
||||
randomIndexesToUpdate.add(str);
|
||||
});
|
||||
List<SimilarityMatrix> matrixUpdateList = new ArrayList();
|
||||
strIndexNavigatorL.forEach((str) -> {
|
||||
randomIndexesToUpdate.stream().filter((str1) -> (!str.equals(str1))).forEachOrdered((str1) -> {
|
||||
boolean present = false;
|
||||
if (multiprocessCalculations.contains(str1)) {
|
||||
present = true;
|
||||
} else {
|
||||
for (SimilarityMatrix SMX : SimilaritySMXList) {
|
||||
if (SMX.getPrimaryString().equals(str) && SMX.getSecondaryString().equals(str1)) {
|
||||
present = true;
|
||||
break;
|
||||
}
|
||||
if (SMX.getPrimaryString().equals(str1) && SMX.getSecondaryString().equals(str)) {
|
||||
present = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!present) {
|
||||
SimilarityMatrix SMX = new SimilarityMatrix(str, str1);
|
||||
double score = SentimentAnalyzerTest.sentimentanalyzing(str, str1);
|
||||
SMX.setDistance(score);
|
||||
matrixUpdateList.add(SMX);
|
||||
SimilaritySMXList.add(SMX);
|
||||
}
|
||||
});
|
||||
});
|
||||
new Thread(() -> {
|
||||
try {
|
||||
if (!matrixUpdateList.isEmpty()) {
|
||||
DataMapper.insertSementicMatrixes(matrixUpdateList);
|
||||
System.out.println("finished datamapper semetic insert");
|
||||
}
|
||||
threadCounter--;
|
||||
} catch (CustomError ex) {
|
||||
Logger.getLogger(MYSQLDatahandler.class
|
||||
.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}).start();
|
||||
}).
|
||||
start();
|
||||
try {
|
||||
wait(800);
|
||||
} catch (InterruptedException ex) {
|
||||
Logger.getLogger(MYSQLDatahandler.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
} else {
|
||||
if (threadCounter == 0) {
|
||||
List<String> strList = new ArrayList(StringCache.values());
|
||||
List<SimilarityMatrix> matrixUpdateList = new ArrayList();
|
||||
List<String> randomStrList = new ArrayList();
|
||||
int indexes = updateBadgesInteger;
|
||||
if (indexes >= strList.size()) {
|
||||
indexes = strList.size() - 1;
|
||||
}
|
||||
int beginindexes = new Random().nextInt((strList.size()) - indexes);
|
||||
strList.subList(beginindexes, beginindexes + indexes).forEach((str) -> {
|
||||
randomStrList.add(str);
|
||||
});
|
||||
multiprocessCalculations.forEach((str) -> {
|
||||
randomStrList.forEach((str1) -> {
|
||||
boolean present = false;
|
||||
for (SimilarityMatrix SMX : SimilaritySMXList) {
|
||||
if (SMX.getPrimaryString().equals(str) && SMX.getSecondaryString().equals(str1)) {
|
||||
present = true;
|
||||
break;
|
||||
}
|
||||
if (SMX.getPrimaryString().equals(str1) && SMX.getSecondaryString().equals(str)) {
|
||||
present = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!present) {
|
||||
SimilarityMatrix SMX = new SimilarityMatrix(str, str1);
|
||||
double score = SentimentAnalyzerTest.sentimentanalyzing(str, str1);
|
||||
SMX.setDistance(score);
|
||||
matrixUpdateList.add(SMX);
|
||||
SimilaritySMXList.add(SMX);
|
||||
}
|
||||
});
|
||||
});
|
||||
try {
|
||||
if (!matrixUpdateList.isEmpty()) {
|
||||
DataMapper.insertSementicMatrixes(matrixUpdateList);
|
||||
System.out.println("finished datamapper semetic insert");
|
||||
}
|
||||
} catch (CustomError ex) {
|
||||
Logger.getLogger(MYSQLDatahandler.class
|
||||
.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
multiprocessCalculations = new ArrayList();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void checkIfUpdateStrings() throws CustomError {
|
||||
if (stopwatch.elapsed(TimeUnit.SECONDS) >= EXPIRE_TIME_IN_SECONDS || !stopwatch.isRunning()) {
|
||||
new Thread(() -> {
|
||||
List<String> str = MessageResponseHandler.getStr();
|
||||
str = cutContent(str);
|
||||
str = filterContent(str);
|
||||
str = removeSlacks(str);
|
||||
List<String> strUpdate = new ArrayList();
|
||||
strUpdate.addAll(str);
|
||||
try {
|
||||
DataMapper.InsertMYSQLStrings(strUpdate);
|
||||
} catch (CustomError ex) {
|
||||
Logger.getLogger(MYSQLDatahandler.class
|
||||
.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
MessageResponseHandler.setStr(new ArrayList());
|
||||
int j = StringCache.size() + 1;
|
||||
for (String str1 : strUpdate) {
|
||||
StringCache.put(j, str1);
|
||||
j++;
|
||||
}
|
||||
}).start();
|
||||
if (!stopwatch.isRunning()) {
|
||||
stopwatch.start();
|
||||
} else {
|
||||
stopwatch.reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public String getResponseMsg(String str) throws CustomError {
|
||||
double Score = -10000;
|
||||
SimilarityMatrix SMXreturn = null;
|
||||
List<String> strLocal = new ArrayList(StringCache.values());
|
||||
for (String str1 : strLocal) {
|
||||
if (str.equals(str1)) {
|
||||
Iterator<SimilarityMatrix> SMXITR = SimilaritySMXList.iterator();
|
||||
while (SMXITR.hasNext()) {
|
||||
SimilarityMatrix SMX = SMXITR.next();
|
||||
if (SMX.getPrimaryString().equals(str) || SMX.getSecondaryString().equals(str)) {
|
||||
double smxDistance = SMX.getDistance();
|
||||
if (smxDistance > Score) {
|
||||
Score = smxDistance;
|
||||
SMXreturn = SMX;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (SMXreturn != null) {
|
||||
if (SMXreturn.getPrimaryString().equals(str)) {
|
||||
return SMXreturn.getSecondaryString();
|
||||
} else {
|
||||
return SMXreturn.getPrimaryString();
|
||||
}
|
||||
}
|
||||
String[] toArray = strLocal.toArray(new String[strLocal.size()]);
|
||||
String mostSimilarSTR = mostSimilar(str, toArray);
|
||||
Iterator<SimilarityMatrix> SMXITR = SimilaritySMXList.iterator();
|
||||
while (SMXITR.hasNext()) {
|
||||
System.out.println("mostSimilarSTR; " + mostSimilarSTR + "\n");
|
||||
mostSimilarSTR = mostSimilarSTR.trim();
|
||||
SimilarityMatrix SMX = SMXITR.next();
|
||||
if (SMX.getPrimaryString().trim().equals(mostSimilarSTR) || SMX.getSecondaryString().trim().equals(mostSimilarSTR)) {
|
||||
double smxDistance = SMX.getDistance();
|
||||
if (smxDistance > Score) {
|
||||
Score = smxDistance;
|
||||
SMXreturn = SMX;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (SMXreturn != null) {
|
||||
if (SMXreturn.getPrimaryString().equals(str)) {
|
||||
return SMXreturn.getSecondaryString();
|
||||
} else {
|
||||
return SMXreturn.getPrimaryString();
|
||||
}
|
||||
}
|
||||
SMXITR = SimilaritySMXList.iterator();
|
||||
while (SMXITR.hasNext()) {
|
||||
SimilarityMatrix SMX = SMXITR.next();
|
||||
if (SMX.getPrimaryString().contains(mostSimilarSTR) || SMX.getSecondaryString().contains(mostSimilarSTR)) {
|
||||
double smxDistance = SMX.getDistance();
|
||||
if (smxDistance > Score) {
|
||||
Score = smxDistance;
|
||||
SMXreturn = SMX;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (SMXreturn != null) {
|
||||
if (SMXreturn.getPrimaryString().equals(str)) {
|
||||
return SMXreturn.getSecondaryString();
|
||||
} else {
|
||||
return SMXreturn.getPrimaryString();
|
||||
}
|
||||
}
|
||||
return "how did you manage to reach this, AAAAAAAAAAAA end my suffering";
|
||||
}
|
||||
|
||||
public String mostSimilar(String toBeCompared, String[] strings) {
|
||||
int minDistance = Integer.MAX_VALUE;
|
||||
String similar = "";
|
||||
for (String str : strings) {
|
||||
int d = LevenshteinDistance.computeLevenshteinDistance(str, toBeCompared);
|
||||
if (d < minDistance) {
|
||||
minDistance = d;
|
||||
similar = str;
|
||||
}
|
||||
}
|
||||
return similar;
|
||||
}
|
||||
|
||||
public static List<String> cutContent(List<String> str) {
|
||||
List<String> returnlist = new ArrayList();
|
||||
for (String str1 : str) {
|
||||
int iend = str1.indexOf("content: ");
|
||||
if (iend != -1) {
|
||||
String trs = str1.substring(iend + 9);
|
||||
returnlist.add(trs.substring(0, trs.length() - 1));
|
||||
}
|
||||
}
|
||||
return returnlist;
|
||||
}
|
||||
|
||||
public static List<String> filterContent(List<String> str) {
|
||||
List<String> strlistreturn = new ArrayList();
|
||||
for (String str1 : str) {
|
||||
if (str1.isEmpty() || str1.length() < 3) {
|
||||
continue;
|
||||
}
|
||||
str1 = str1.trim();
|
||||
if (str1.contains("PM*")) {
|
||||
str1 = str1.substring(str1.indexOf("PM*") + 5);
|
||||
}
|
||||
if (str1.contains("AM*")) {
|
||||
str1 = str1.substring(str1.indexOf("AM*") + 5);
|
||||
}
|
||||
for (Character c : str1.toCharArray()) {
|
||||
if (c == '?' || c == '°') {
|
||||
str1 = str1.replace("?", " <:wlenny:514861023002624001> ");
|
||||
str1 = str1.replace("°", " <:wlenny:514861023002624001> ");
|
||||
}
|
||||
}
|
||||
if (str1.contains("(Counter-Terrorist)")) {
|
||||
str1 = str1.replace("(Counter-Terrorist)", " ");
|
||||
}
|
||||
if (str1.contains("(Terrorist)")) {
|
||||
str1 = str1.replace("(Terrorist)", " ");
|
||||
}
|
||||
if (str1.contains("(Spectator)")) {
|
||||
str1 = str1.replace("(Spectator)", " ");
|
||||
}
|
||||
if (str1.contains("*DEAD*")) {
|
||||
str1 = str1.replace("*DEAD*", " ");
|
||||
}
|
||||
if (str1.contains("{red}")) {
|
||||
str1 = str1.replace("{red}", " ");
|
||||
}
|
||||
if (str1.contains("{orange}")) {
|
||||
str1 = str1.replace("{orange}", " ");
|
||||
}
|
||||
if (str1.contains("{yellow}")) {
|
||||
str1 = str1.replace("{yellow}", " ");
|
||||
}
|
||||
if (str1.contains("{green}")) {
|
||||
str1 = str1.replace("{green}", " ");
|
||||
}
|
||||
if (str1.contains("{lightblue}")) {
|
||||
str1 = str1.replace("{lightblue}", " ");
|
||||
}
|
||||
if (str1.contains("{blue}")) {
|
||||
str1 = str1.replace("{blue}", " ");
|
||||
}
|
||||
if (str1.contains("{purple}")) {
|
||||
str1 = str1.replace("{purple}", " ");
|
||||
}
|
||||
if (str1.contains("{white}")) {
|
||||
str1 = str1.replace("{white}", " ");
|
||||
}
|
||||
str1 = str1.trim();
|
||||
if (str1.length() > 2 && (!str1.startsWith("!"))) {
|
||||
strlistreturn.add(str1);
|
||||
}
|
||||
}
|
||||
return strlistreturn;
|
||||
}
|
||||
|
||||
/*
|
||||
public static List<String> cutLongsFromEmotes(List<String> str) {
|
||||
List<String> strreturn = new ArrayList();
|
||||
int emotesTraceLong = 17;
|
||||
for (String str1 : str) {
|
||||
StringBuilder SB = new StringBuilder();
|
||||
int counter = 0;
|
||||
int i = 0;
|
||||
for (Character c : str1.toCharArray()) {
|
||||
i++;
|
||||
if (Character.isDigit(c)) {
|
||||
counter++;
|
||||
if (counter > emotesTraceLong && str1.length() > i + 2) {
|
||||
SB.append(str1.substring(0, i - emotesTraceLong));
|
||||
SB.append(str1.substring(i + 1, str1.length()));
|
||||
}
|
||||
} else {
|
||||
counter = 0;
|
||||
}
|
||||
}
|
||||
if (SB.toString().isEmpty()) {
|
||||
strreturn.add(str1);
|
||||
} else {
|
||||
strreturn.add(SB.toString());
|
||||
}
|
||||
}
|
||||
return strreturn;
|
||||
}
|
||||
*/
|
||||
private List<String> removeSlacks(List<String> str) {
|
||||
ShiftReduceParser model = SentimentAnalyzerTest.getModel();
|
||||
MaxentTagger tagger = SentimentAnalyzerTest.getTagger();
|
||||
List<TaggedWord> taggedWords;
|
||||
List<String> strreturn = new ArrayList();
|
||||
for (String str1 : str) {
|
||||
int counter = 0;
|
||||
List<String> TGWList = new ArrayList();
|
||||
DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(str1));
|
||||
for (List<HasWord> sentence : tokenizer) {
|
||||
List<TaggedWord> tagged1 = tagger.tagSentence(sentence);
|
||||
Tree tree = model.apply(tagged1);
|
||||
taggedWords = tree.taggedYield();
|
||||
for (TaggedWord TGW : taggedWords) {
|
||||
if (!TGWList.contains(TGW.tag()) && !TGW.tag().equals(":") && !TGW.word().equals(TGW.tag())) {
|
||||
TGWList.add(TGW.tag());
|
||||
counter++;
|
||||
}
|
||||
if (counter > 3) {
|
||||
int addCounter = 0;
|
||||
List<Word> wordList = new ArrayList();
|
||||
for (Word lab : tree.yieldWords()) {
|
||||
if (lab != null && lab.word() != null) {
|
||||
//System.out.println("lab: " + lab + " \n");
|
||||
if (!wordList.contains(lab) && !lab.equals(":")) {
|
||||
wordList.add(lab);
|
||||
addCounter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (addCounter > 3) {
|
||||
addCounter = 0;
|
||||
List<HasWord> HWlist = new ArrayList();
|
||||
for (HasWord HW : tree.yieldHasWord()) {
|
||||
if (HW != null && HW.word() != null && !HWlist.contains(HW)) {
|
||||
//System.out.println("HasWord: " + HW + "\n");
|
||||
addCounter++;
|
||||
HWlist.add(HW);
|
||||
}
|
||||
}
|
||||
if (addCounter > 3) {
|
||||
boolean tooclosematch = false;
|
||||
for (String strVals : StringCache.values()) {
|
||||
double Distance = LevenshteinDistance.computeLevenshteinDistance(strVals, str1);
|
||||
int maxpermittedDistance = 5;
|
||||
if (Distance < maxpermittedDistance) {
|
||||
tooclosematch = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!tooclosematch) {
|
||||
strreturn.add(str1);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (counter > 3) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return strreturn;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @throws CustomError
|
||||
* @deprecated
|
||||
*/
|
||||
public synchronized void calculateStrings() throws CustomError {
|
||||
//linkedhashmap?
|
||||
int initiallimit = 5;
|
||||
int listindex = 0;
|
||||
List<SimilarityMatrix> WS4JList = DataMapper.getAllSementicMatrixes();
|
||||
List<SimilarityMatrix> WS4JListUpdate = new ArrayList();
|
||||
List<String> sentencesList = DataMapper.getAllStrings();
|
||||
for (int i = 1; i < initiallimit; i++) {
|
||||
if (!sentencesList.get(i).isEmpty()) {
|
||||
//System.out.print("i: " + i + "\n sentencesList i: " + sentencesList.get(i) + "\n");
|
||||
String[] words1 = sentencesList.get(i).split(" ");
|
||||
for (String words11 : words1) {
|
||||
if (!words11.isEmpty() && words11.length() > 3) {
|
||||
String str = words11;
|
||||
if (!str.isEmpty() && str.length() > 3) {
|
||||
//SecondaryPredicate, no secondary key present with word
|
||||
Predicate<SimilarityMatrix> SecondaryPredicate = e -> str.equals(e.getSecondaryString());
|
||||
//no primary key present with the word
|
||||
if (!WS4JList.stream().filter(e -> str.equals(e.getPrimaryString())).findAny().isPresent()) {
|
||||
WS4JListUpdate = createWS4JWordMatrix(str, sentencesList, WS4JListUpdate, WS4JList, SecondaryPredicate);
|
||||
for (; listindex < WS4JListUpdate.size(); listindex++) {
|
||||
WS4JList.add(WS4JListUpdate.get(listindex));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//System.out.println("\nWS4JListUpdate size: " + WS4JListUpdate.size());
|
||||
DataMapper.insertSementicMatrixes(WS4JListUpdate);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param str
|
||||
* @param strlist
|
||||
* @param ws4jlist
|
||||
* @param oldlist
|
||||
* @param SecondaryPredicate
|
||||
* @return
|
||||
* @deprecated
|
||||
*/
|
||||
public List<SimilarityMatrix> createWS4JWordMatrix(String str, List<String> strlist, List<SimilarityMatrix> ws4jlist,
|
||||
List<SimilarityMatrix> oldlist, Predicate<SimilarityMatrix> SecondaryPredicate) {
|
||||
for (String str1 : strlist) {
|
||||
if (!str1.isEmpty()) {
|
||||
String[] words1 = str1.split(" ");
|
||||
for (int j = 0; j < words1.length; j++) {
|
||||
if (!words1[j].isEmpty() && words1[j].length() > 3) {
|
||||
String strword = words1[j];
|
||||
if (!strword.isEmpty() && strword.length() > 3 && !strword.equals(str)) {
|
||||
Predicate<SimilarityMatrix> PrimaryPredicate = e -> strword.equals(e.getPrimaryString());
|
||||
if (!oldlist.stream().filter(PrimaryPredicate.and(SecondaryPredicate)).findAny().isPresent()) {
|
||||
//System.out.println("creating SimilarityMatrix with STR: " + str + "\n strword: " + strword + "\n");
|
||||
SimilarityMatrix ws4j = new SimilarityMatrix(str, strword);
|
||||
/*
|
||||
double addingDistance = ws4j.getDistanceCalculations();
|
||||
if (addingDistance > 0.00) {
|
||||
//System.out.println("added! \n");
|
||||
ws4j.setDistance(addingDistance);
|
||||
ws4jlist.add(ws4j);
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ws4jlist;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stanford Parser method to update calculations to the DB
|
||||
*
|
||||
* @deprecated
|
||||
* @throws FunctionLayer.CustomError
|
||||
*/
|
||||
public void sentimentAnalyzingStringsToDB() throws CustomError {
|
||||
|
||||
List<String> sentencesList = DataMapper.getAllStrings();
|
||||
List<SimilarityMatrix> WS4JList = DataMapper.getAllSementicMatrixes();
|
||||
List<SimilarityMatrix> WS4JListUpdate = new ArrayList();
|
||||
int listindex = 0;
|
||||
for (int i = 0; i < sentencesList.size() - 3000; i++) {
|
||||
String str = sentencesList.get(i);
|
||||
if (!str.isEmpty() && str.length() > 3) {
|
||||
//System.out.println("i: " + i + "\n");
|
||||
Predicate<SimilarityMatrix> SecondaryPredicate = e -> str.equals(e.getSecondaryString());
|
||||
if (!WS4JList.stream().filter(e -> str.equals(e.getPrimaryString())).findAny().isPresent()) {
|
||||
//WS4JListUpdate = addStringMatrixes(str, sentencesList, SecondaryPredicate, WS4JListUpdate, WS4JList);
|
||||
for (; listindex < WS4JListUpdate.size(); listindex++) {
|
||||
WS4JList.add(WS4JListUpdate.get(listindex));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// System.out.println("\n WS4JListUpdate size: " + WS4JListUpdate.size());
|
||||
DataMapper.insertSementicMatrixes(WS4JListUpdate);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated @param str Primary string which is checked, the filter
|
||||
* ensures primary string has not been calculated yet
|
||||
* @param sentencesList the full string list where each unique primary has
|
||||
* to calculate its value to the rest of the DB
|
||||
* @param SecondaryPredicate ensures primary string is not already
|
||||
* calculated somewhere with another string
|
||||
* @param WS4JListUpdate Matrix list to update in DB with new Sentences
|
||||
* @param OldWS4JList Check if str1 already has primary entry with
|
||||
* co-responding SecondaryPredicate
|
||||
* @return Updated List which has to be inserted to the DB
|
||||
*/
|
||||
private List<SimilarityMatrix> addStringMatrixes(String str, List<String> sentencesList,
|
||||
Predicate<SimilarityMatrix> SecondaryPredicate, List<SimilarityMatrix> WS4JListUpdate,
|
||||
List<SimilarityMatrix> OldWS4JList, LexicalizedParser lp, TreebankLanguagePack tlp) {
|
||||
double valuerange = -100.0;
|
||||
for (int i = 0; i < sentencesList.size(); i++) {
|
||||
String str1 = sentencesList.get(i);
|
||||
if (!str1.isEmpty() && str1.length() > 3) {
|
||||
Predicate<SimilarityMatrix> PrimaryPredicate = e -> str1.equals(e.getPrimaryString());
|
||||
if (!OldWS4JList.stream().filter(PrimaryPredicate.and(SecondaryPredicate)).findAny().isPresent()) {
|
||||
double s = -100.0;
|
||||
if (s > valuerange) {
|
||||
SimilarityMatrix SMX = new SimilarityMatrix(str, str1);
|
||||
SMX.setDistance(s);
|
||||
/*
|
||||
System.out.println("SMX added: \n Primary: " + SMX.getPrimaryString() + "\n Secondary: " + SMX.getSecondaryString()
|
||||
+ "\n Score: " + SMX.getDistance() + "\n");
|
||||
*/
|
||||
WS4JListUpdate.add(SMX);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
str parameter is primary and not used as primary if reaching here
|
||||
secondary predicate ensures primary does not already exist as secondary with co-responding strlist primary
|
||||
*/
|
||||
return WS4JListUpdate;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package FunctionLayer;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
//https://www.programcreek.com/java-api-examples/index.php?source_dir=simmetrics-master/simmetrics-core/src/main/java/org/simmetrics/metrics/JaroWinkler.java#
|
||||
//https://stackoverflow.com/questions/36032958/semantic-matching-in-ws4j-at-sentence-level
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class MessageResponseHandler {
|
||||
|
||||
private static List<String> str = new ArrayList();
|
||||
|
||||
public static List<String> getStr() {
|
||||
return str;
|
||||
}
|
||||
|
||||
public static void setStr(List<String> str) {
|
||||
MessageResponseHandler.str = str;
|
||||
}
|
||||
|
||||
public static void getMessage(String message) {
|
||||
if (message != null && !message.isEmpty()) {
|
||||
message = message.replace("@", "");
|
||||
if (message.startsWith("[ *")) {
|
||||
message = message.substring(message.indexOf("]"));
|
||||
}
|
||||
str.add(message);
|
||||
}
|
||||
}
|
||||
|
||||
public static String selectReponseMessage(String toString) throws CustomError {
|
||||
List<String> str1 = new ArrayList();
|
||||
str1.add(toString);
|
||||
str1 = MYSQLDatahandler.cutContent(str1);
|
||||
String strreturn = str1.get(0);
|
||||
String getResponseMsg = MYSQLDatahandler.instance.getResponseMsg(strreturn);
|
||||
return getResponseMsg;
|
||||
}
|
||||
}
|
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package FunctionLayer;
|
||||
|
||||
import com.google.common.collect.Multimap;
|
||||
import com.google.common.collect.Multiset;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class SimilarityMatrix{
|
||||
|
||||
private String PrimaryString;
|
||||
private String SecondaryString;
|
||||
private double distance;
|
||||
|
||||
public double getDistance() {
|
||||
return distance;
|
||||
}
|
||||
|
||||
public void setDistance(double distance) {
|
||||
this.distance = distance;
|
||||
}
|
||||
|
||||
public SimilarityMatrix(String str1, String str2) {
|
||||
this.PrimaryString = str1;
|
||||
this.SecondaryString = str2;
|
||||
}
|
||||
|
||||
public SimilarityMatrix(String str1, String str2, double result) {
|
||||
this.PrimaryString = str1;
|
||||
this.SecondaryString = str2;
|
||||
this.distance = result;
|
||||
}
|
||||
|
||||
/*
|
||||
public double getDistanceCalculations() {
|
||||
ILexicalDatabase db = new NictWordNet();
|
||||
WS4JConfiguration.getInstance().setMFS(true);
|
||||
RelatednessCalculator rc1 = new WuPalmer(db);
|
||||
RelatednessCalculator rc2 = new Resnik(db);
|
||||
RelatednessCalculator rc3 = new JiangConrath(db);
|
||||
RelatednessCalculator rc4 = new Lin(db);
|
||||
RelatednessCalculator rc5 = new LeacockChodorow(db);
|
||||
RelatednessCalculator rc6 = new Path(db);
|
||||
RelatednessCalculator rc7 = new Lesk(db);
|
||||
RelatednessCalculator rc8 = new HirstStOnge(db);
|
||||
double maxScore = -1D;
|
||||
List<RelatednessCalculator> RCList = new ArrayList();
|
||||
RCList.add(rc1);
|
||||
RCList.add(rc2);
|
||||
RCList.add(rc3);
|
||||
RCList.add(rc4);
|
||||
RCList.add(rc5);
|
||||
RCList.add(rc6);
|
||||
RCList.add(rc7);
|
||||
RCList.add(rc8);
|
||||
for (RelatednessCalculator rc : RCList) {
|
||||
double s = rc.calcRelatednessOfWords(PrimaryString, SecondaryString);
|
||||
s /= 1000;
|
||||
if (s > 0.000000) {
|
||||
System.out.println("s: " + s + "\n" + " PrimaryString: " + PrimaryString + "\n" + " SecondaryString: " + SecondaryString + "\n"
|
||||
+ " rc: " + rc.toString() + "\n");
|
||||
}
|
||||
String str = String.format("%.12f", s);
|
||||
if (str.contains(",")) {
|
||||
str = str.substring(0, str.indexOf(","));
|
||||
}
|
||||
int strend = str.length() > 6 ? 6 : str.length();
|
||||
str = str.substring(0, strend);
|
||||
double score = Double.valueOf(str);
|
||||
if (score > maxScore) {
|
||||
maxScore = score;
|
||||
}
|
||||
}
|
||||
return maxScore == -1D ? 0.00 : maxScore;
|
||||
}
|
||||
*/
|
||||
public String getPrimaryString() {
|
||||
return PrimaryString;
|
||||
}
|
||||
|
||||
public void setPrimaryString(String PrimaryString) {
|
||||
this.PrimaryString = PrimaryString;
|
||||
}
|
||||
|
||||
public String getSecondaryString() {
|
||||
return SecondaryString;
|
||||
}
|
||||
|
||||
public void setSecondaryString(String SecondaryString) {
|
||||
this.SecondaryString = SecondaryString;
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,259 @@
|
||||
package FunctionLayer.StanfordParser;
|
||||
|
||||
import FunctionLayer.LevenshteinDistance;
|
||||
import edu.stanford.nlp.ling.CoreAnnotations;
|
||||
import edu.stanford.nlp.ling.CoreLabel;
|
||||
import edu.stanford.nlp.ling.HasWord;
|
||||
import edu.stanford.nlp.ling.IndexedWord;
|
||||
import edu.stanford.nlp.ling.Label;
|
||||
import edu.stanford.nlp.ling.TaggedWord;
|
||||
import edu.stanford.nlp.neural.rnn.RNNCoreAnnotations;
|
||||
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
|
||||
import edu.stanford.nlp.parser.shiftreduce.ShiftReduceParser;
|
||||
import edu.stanford.nlp.pipeline.Annotation;
|
||||
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
|
||||
import edu.stanford.nlp.process.DocumentPreprocessor;
|
||||
import edu.stanford.nlp.process.Tokenizer;
|
||||
import edu.stanford.nlp.sentiment.SentimentCoreAnnotations;
|
||||
import edu.stanford.nlp.tagger.maxent.MaxentTagger;
|
||||
import edu.stanford.nlp.trees.Constituent;
|
||||
import edu.stanford.nlp.trees.GrammaticalRelation;
|
||||
import edu.stanford.nlp.trees.GrammaticalStructure;
|
||||
import edu.stanford.nlp.trees.GrammaticalStructureFactory;
|
||||
import edu.stanford.nlp.trees.Tree;
|
||||
import edu.stanford.nlp.trees.TreeCoreAnnotations;
|
||||
import edu.stanford.nlp.trees.TreebankLanguagePack;
|
||||
import edu.stanford.nlp.trees.TypedDependency;
|
||||
import edu.stanford.nlp.trees.tregex.gui.Tdiff;
|
||||
import edu.stanford.nlp.util.CoreMap;
|
||||
import java.io.StringReader;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import org.ejml.simple.SimpleMatrix;
|
||||
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class SentimentAnalyzerTest {
|
||||
|
||||
private static String modelPath = "edu/stanford/nlp/models/srparser/englishSR.ser.gz";
|
||||
private static String sentimentModel = "edu/stanford/nlp/models/sentiment/sentiment.ser.gz";
|
||||
private static String parserModelPathUD = "edu/stanford/nlp/models/parser/nndep/english_UD.gz";
|
||||
private static String lexParserEnglishRNN = "edu/stanford/nlp/models/lexparser/englishRNN.ser.gz";
|
||||
private static String taggerPath = "edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger";
|
||||
private static MaxentTagger tagger;
|
||||
private static ShiftReduceParser model;
|
||||
private static String[] options = {"-maxLength", "100"};
|
||||
private static LexicalizedParser lp;
|
||||
private static TreebankLanguagePack tlp;
|
||||
private static Properties props = new Properties();
|
||||
private static Properties propsSentiment = new Properties();
|
||||
private static GrammaticalStructureFactory gsf;
|
||||
private static StanfordCoreNLP pipeline;
|
||||
private static StanfordCoreNLP pipelineSentiment;
|
||||
|
||||
public static void shiftReduceParserInitiate() {
|
||||
model = ShiftReduceParser.loadModel(modelPath, options);
|
||||
tagger = new MaxentTagger(taggerPath);
|
||||
lp = LexicalizedParser.loadModel(lexParserEnglishRNN, options);
|
||||
tlp = lp.getOp().langpack();
|
||||
gsf = tlp.grammaticalStructureFactory();
|
||||
props.setProperty("annotators", "tokenize,ssplit,pos,lemma,parse");
|
||||
// set up pipeline properties
|
||||
props.setProperty("parse.model", modelPath);
|
||||
// use faster shift reduce parser
|
||||
props.setProperty("parse.maxlen", "100");
|
||||
props.setProperty("parse.binaryTrees", "true");
|
||||
propsSentiment.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
|
||||
propsSentiment.setProperty("parse.model", lexParserEnglishRNN);
|
||||
propsSentiment.setProperty("parse.maxlen", "100");
|
||||
pipeline = new StanfordCoreNLP(props);
|
||||
pipelineSentiment = new StanfordCoreNLP(propsSentiment);
|
||||
}
|
||||
|
||||
public static ShiftReduceParser getModel() {
|
||||
return model;
|
||||
}
|
||||
|
||||
public static MaxentTagger getTagger() {
|
||||
return tagger;
|
||||
}
|
||||
|
||||
public static double sentimentanalyzing(String str, String str1) {
|
||||
double score = -100.0;
|
||||
List<List<TaggedWord>> taggedwordlist1 = new ArrayList();
|
||||
List<List<TaggedWord>> taggedwordlist2 = new ArrayList();
|
||||
DocumentPreprocessor tokenizer = new DocumentPreprocessor(new StringReader(str1));
|
||||
for (List<HasWord> sentence : tokenizer) {
|
||||
List<TaggedWord> tagged1 = tagger.tagSentence(sentence);
|
||||
Tree tree = model.apply(tagged1);
|
||||
ArrayList<TaggedWord> taggedYield = tree.taggedYield();
|
||||
taggedwordlist1.add(taggedYield);
|
||||
}
|
||||
tokenizer = new DocumentPreprocessor(new StringReader(str));
|
||||
for (List<HasWord> sentence : tokenizer) {
|
||||
List<TaggedWord> tagged1 = tagger.tagSentence(sentence);
|
||||
Tree tree = model.apply(tagged1);
|
||||
ArrayList<TaggedWord> taggedYield = tree.taggedYield();
|
||||
taggedwordlist2.add(taggedYield);
|
||||
}
|
||||
int counter = 0;
|
||||
int counter1 = 0;
|
||||
for (List<TaggedWord> taggedlist2 : taggedwordlist2) {
|
||||
counter += taggedlist2.size();
|
||||
}
|
||||
for (List<TaggedWord> taggedlist1 : taggedwordlist1) {
|
||||
counter1 += taggedlist1.size();
|
||||
}
|
||||
int overValue = counter >= counter1 ? counter - counter1 : counter1 - counter;
|
||||
overValue *= 16;
|
||||
while (overValue > 0) {
|
||||
overValue--;
|
||||
score--;
|
||||
}
|
||||
System.out.println("Score Post overValue: " + score + "\n");
|
||||
for (List<TaggedWord> TGWList : taggedwordlist1) {
|
||||
for (TaggedWord TGW : TGWList) {
|
||||
List<String> tgwlist1 = new ArrayList();
|
||||
for (List<TaggedWord> taggedlist2 : taggedwordlist2) {
|
||||
for (TaggedWord TGW1 : taggedlist2) {
|
||||
if (TGW.tag().equals(TGW1.tag()) && !TGW.tag().equals(":") && !tgwlist1.contains(TGW1.tag())) {
|
||||
score += 64;
|
||||
tgwlist1.add(TGW.tag());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// System.out.println("Score: " + score);
|
||||
Annotation annotation = new Annotation(str1);
|
||||
pipeline.annotate(annotation);
|
||||
List<Tree> sentenceConstituencyParseList = new ArrayList();
|
||||
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
|
||||
Tree sentenceConstituencyParse = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
|
||||
sentenceConstituencyParseList.add(sentenceConstituencyParse);
|
||||
}
|
||||
Annotation annotation1 = new Annotation(str);
|
||||
pipeline.annotate(annotation1);
|
||||
for (CoreMap sentence : annotation1.get(CoreAnnotations.SentencesAnnotation.class)) {
|
||||
Tree sentenceConstituencyParse = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
|
||||
GrammaticalStructure gs = gsf.newGrammaticalStructure(sentenceConstituencyParse);
|
||||
Collection<TypedDependency> allTypedDependencies = gs.allTypedDependencies();
|
||||
List<String> filerTreeContent = new ArrayList();
|
||||
for (Tree sentenceConstituencyParse1 : sentenceConstituencyParseList) {
|
||||
Set<Constituent> inT1notT2 = Tdiff.markDiff(sentenceConstituencyParse, sentenceConstituencyParse1);
|
||||
Set<Constituent> inT2notT1 = Tdiff.markDiff(sentenceConstituencyParse1, sentenceConstituencyParse);
|
||||
List<String> constiLabels = new ArrayList();
|
||||
for (Constituent consti : inT1notT2) {
|
||||
for (Constituent consti1 : inT2notT1) {
|
||||
if (consti.value().equals(consti1.value()) && !constiLabels.contains(consti.value())) {
|
||||
//System.out.println("consti value: " + consti.value() + "\n");
|
||||
score += 64; //256
|
||||
constiLabels.add(consti.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
GrammaticalStructure gs1 = gsf.newGrammaticalStructure(sentenceConstituencyParse1);
|
||||
Collection<TypedDependency> allTypedDependencies1 = gs1.allTypedDependencies();
|
||||
for (TypedDependency TDY1 : allTypedDependencies1) {
|
||||
IndexedWord dep = TDY1.dep();
|
||||
IndexedWord gov = TDY1.gov();
|
||||
GrammaticalRelation grammaticalRelation = gs.getGrammaticalRelation(gov, dep);
|
||||
if (grammaticalRelation.isApplicable(sentenceConstituencyParse)) {
|
||||
score += 900;
|
||||
//System.out.println("grammaticalRelation applicable: " + score + "\n");
|
||||
}
|
||||
GrammaticalRelation reln = TDY1.reln();
|
||||
if (reln.isApplicable(sentenceConstituencyParse)) {
|
||||
score += 256;
|
||||
}
|
||||
}
|
||||
for (TypedDependency TDY : allTypedDependencies) {
|
||||
IndexedWord dep = TDY.dep();
|
||||
IndexedWord gov = TDY.gov();
|
||||
GrammaticalRelation grammaticalRelation = gs1.getGrammaticalRelation(gov, dep);
|
||||
if (grammaticalRelation.isApplicable(sentenceConstituencyParse)) {
|
||||
score += 900;
|
||||
//System.out.println("grammaticalRelation applicable: " + score + "\n");
|
||||
}
|
||||
GrammaticalRelation reln = TDY.reln();
|
||||
if (reln.isApplicable(sentenceConstituencyParse1)) {
|
||||
score += 256;
|
||||
}
|
||||
}
|
||||
for (CoreLabel LBW : sentenceConstituencyParse.taggedLabeledYield()) {
|
||||
for (CoreLabel LBW1 : sentenceConstituencyParse1.taggedLabeledYield()) {
|
||||
if (LBW.lemma().equals(LBW1.lemma()) && !filerTreeContent.contains(LBW.lemma())) {
|
||||
filerTreeContent.add(LBW.lemma());
|
||||
score += 1500;
|
||||
//System.out.println("lemma: " + LBW.lemma() + "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Annotation annotationSentiment1 = pipelineSentiment.process(str);
|
||||
List<SimpleMatrix> simpleSMXlist = new ArrayList();
|
||||
List<SimpleMatrix> simpleSMXlistVector = new ArrayList();
|
||||
for (CoreMap sentence : annotationSentiment1.get(CoreAnnotations.SentencesAnnotation.class)) {
|
||||
Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
|
||||
SimpleMatrix predictions = RNNCoreAnnotations.getPredictions(tree);
|
||||
SimpleMatrix nodeVector = RNNCoreAnnotations.getNodeVector(tree);
|
||||
simpleSMXlist.add(predictions);
|
||||
simpleSMXlistVector.add(nodeVector);
|
||||
}
|
||||
annotationSentiment1 = pipelineSentiment.process(str1);
|
||||
for (CoreMap sentence : annotationSentiment1.get(CoreAnnotations.SentencesAnnotation.class)) {
|
||||
Tree tree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
|
||||
SimpleMatrix predictions = RNNCoreAnnotations.getPredictions(tree);
|
||||
SimpleMatrix nodeVector = RNNCoreAnnotations.getNodeVector(tree);
|
||||
for (SimpleMatrix simpleSMX : simpleSMXlist) {
|
||||
double dot = predictions.dot(simpleSMX) * 100;
|
||||
//System.out.println("dot value: " + dot + "\n");
|
||||
double subtracter = dot > 50 ? dot - 50 : 50 - dot;
|
||||
System.out.println("score pre dot: " + score + "\nsubtracter: " + subtracter + "\n");
|
||||
subtracter *= 25;
|
||||
while (subtracter > 0) {
|
||||
subtracter--;
|
||||
score--;
|
||||
}
|
||||
System.out.println("score post dot: " + score + "\n");
|
||||
}
|
||||
for (SimpleMatrix simpleSMX : simpleSMXlistVector) {
|
||||
double dot = nodeVector.dot(simpleSMX);
|
||||
double elementSum = nodeVector.kron(simpleSMX).elementSum();
|
||||
elementSum = Math.round(elementSum * 100.0) / 100.0;
|
||||
System.out.println("kron SMX elementSum: " + elementSum + "\n");
|
||||
if (dot < 0.1) {
|
||||
// System.out.println("\ndot VECTOR: " + dot + "\n");
|
||||
score += 256;
|
||||
}
|
||||
if (elementSum < 0.1 && elementSum > 0.0) {
|
||||
score += 1300;
|
||||
} else if (elementSum > 0.1 && elementSum < 1.0) {
|
||||
score -= 1100;
|
||||
} else {
|
||||
score -= 1424;
|
||||
}
|
||||
}
|
||||
}
|
||||
int SentenceScoreDiff = LevenshteinDistance.computeLevenshteinDistance(str, str1);
|
||||
SentenceScoreDiff *= 15;
|
||||
// System.out.println("score pre levenhstein substract: " + score + "\nSentenceScoreDiff: " + SentenceScoreDiff + "\n");
|
||||
while (SentenceScoreDiff > 0) {
|
||||
SentenceScoreDiff--;
|
||||
score--;
|
||||
}
|
||||
System.out.println("Final current score: " + score + "\nSentences: " + str + "\n" + str1 + "\n\n\n");
|
||||
return score;
|
||||
}
|
||||
}
|
@ -0,0 +1,114 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package FunctionLayer.StanfordParser;
|
||||
|
||||
import edu.cmu.lti.lexical_db.ILexicalDatabase;
|
||||
import edu.cmu.lti.lexical_db.NictWordNet;
|
||||
import edu.cmu.lti.ws4j.RelatednessCalculator;
|
||||
import edu.cmu.lti.ws4j.impl.HirstStOnge;
|
||||
import edu.cmu.lti.ws4j.impl.JiangConrath;
|
||||
import edu.cmu.lti.ws4j.impl.LeacockChodorow;
|
||||
import edu.cmu.lti.ws4j.impl.Lesk;
|
||||
import edu.cmu.lti.ws4j.impl.Lin;
|
||||
import edu.cmu.lti.ws4j.impl.Path;
|
||||
import edu.cmu.lti.ws4j.impl.Resnik;
|
||||
import edu.cmu.lti.ws4j.impl.WuPalmer;
|
||||
import edu.cmu.lti.ws4j.util.WS4JConfiguration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class SimilarityMatrix {
|
||||
|
||||
private String PrimaryString;
|
||||
private String SecondaryString;
|
||||
private double distance;
|
||||
|
||||
public double getDistance() {
|
||||
return distance;
|
||||
}
|
||||
|
||||
public void setDistance(double distance) {
|
||||
this.distance = distance;
|
||||
}
|
||||
|
||||
public SimilarityMatrix(String str1, String str2) {
|
||||
this.PrimaryString = str1;
|
||||
this.SecondaryString = str2;
|
||||
}
|
||||
|
||||
public SimilarityMatrix(String str1, String str2, double result) {
|
||||
this.PrimaryString = str1;
|
||||
this.SecondaryString = str2;
|
||||
this.distance = result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* @return ws4j distance caluclation add infinitum absurdum
|
||||
*/
|
||||
public double getDistanceCalculations() {
|
||||
ILexicalDatabase db = new NictWordNet();
|
||||
WS4JConfiguration.getInstance().setMFS(true);
|
||||
RelatednessCalculator rc1 = new WuPalmer(db);
|
||||
RelatednessCalculator rc2 = new Resnik(db);
|
||||
RelatednessCalculator rc3 = new JiangConrath(db);
|
||||
RelatednessCalculator rc4 = new Lin(db);
|
||||
RelatednessCalculator rc5 = new LeacockChodorow(db);
|
||||
RelatednessCalculator rc6 = new Path(db);
|
||||
RelatednessCalculator rc7 = new Lesk(db);
|
||||
RelatednessCalculator rc8 = new HirstStOnge(db);
|
||||
double maxScore = -1D;
|
||||
List<RelatednessCalculator> RCList = new ArrayList();
|
||||
RCList.add(rc1);
|
||||
RCList.add(rc2);
|
||||
RCList.add(rc3);
|
||||
RCList.add(rc4);
|
||||
RCList.add(rc5);
|
||||
RCList.add(rc6);
|
||||
RCList.add(rc7);
|
||||
RCList.add(rc8);
|
||||
for (RelatednessCalculator rc : RCList) {
|
||||
double s = rc.calcRelatednessOfWords(PrimaryString, SecondaryString);
|
||||
s /= 1000;
|
||||
if (s > 0.000000) {
|
||||
System.out.println("s: " + s + "\n" + " PrimaryString: " + PrimaryString + "\n" + " SecondaryString: " + SecondaryString + "\n"
|
||||
+ " rc: " + rc.toString() + "\n");
|
||||
}
|
||||
String str = String.format("%.12f", s);
|
||||
if (str.contains(",")) {
|
||||
str = str.substring(0, str.indexOf(","));
|
||||
}
|
||||
int strend = str.length() > 6 ? 6 : str.length();
|
||||
str = str.substring(0, strend);
|
||||
double score = Double.valueOf(str);
|
||||
if (score > maxScore) {
|
||||
maxScore = score;
|
||||
}
|
||||
}
|
||||
return maxScore == -1D ? 0.00 : maxScore;
|
||||
}
|
||||
|
||||
public String getPrimaryString() {
|
||||
return PrimaryString;
|
||||
}
|
||||
|
||||
public void setPrimaryString(String PrimaryString) {
|
||||
this.PrimaryString = PrimaryString;
|
||||
}
|
||||
|
||||
public String getSecondaryString() {
|
||||
return SecondaryString;
|
||||
}
|
||||
|
||||
public void setSecondaryString(String SecondaryString) {
|
||||
this.SecondaryString = SecondaryString;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,73 @@
|
||||
package FunctionLayer.misc;
|
||||
|
||||
import edu.stanford.nlp.ling.HasWord;
|
||||
import edu.stanford.nlp.ling.IndexedWord;
|
||||
import edu.stanford.nlp.ling.TaggedWord;
|
||||
import edu.stanford.nlp.ling.Word;
|
||||
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
|
||||
import edu.stanford.nlp.process.DocumentPreprocessor;
|
||||
import edu.stanford.nlp.process.Tokenizer;
|
||||
import edu.stanford.nlp.trees.GrammaticalStructure;
|
||||
import edu.stanford.nlp.trees.GrammaticalStructureFactory;
|
||||
import edu.stanford.nlp.trees.Tree;
|
||||
import edu.stanford.nlp.trees.TreebankLanguagePack;
|
||||
import edu.stanford.nlp.trees.TypedDependency;
|
||||
import java.io.StringReader;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class SentimentAnalyzerTest {
|
||||
|
||||
public static SentimentAnalyzerTest instance = new SentimentAnalyzerTest();
|
||||
public static String grammar = "edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz";
|
||||
public static String[] options = {"-maxLength", "80", "-retainTmpSubcategories"};
|
||||
|
||||
public static LexicalizedParser initiateLexicalizedParser() {
|
||||
LexicalizedParser lp = LexicalizedParser.loadModel(grammar, options);
|
||||
return lp;
|
||||
}
|
||||
|
||||
public static TreebankLanguagePack initiateTreebankLanguagePack(LexicalizedParser lp) {
|
||||
TreebankLanguagePack tlp = lp.getOp().langpack();
|
||||
return tlp;
|
||||
}
|
||||
|
||||
public double sentimentanalyzing(String str, String str1, double sreturn, LexicalizedParser lp, TreebankLanguagePack tlp) {
|
||||
Iterable<List<? extends HasWord>> sentences;
|
||||
Tokenizer<? extends HasWord> toke
|
||||
= tlp.getTokenizerFactory().getTokenizer(new StringReader(str));
|
||||
List<? extends HasWord> sentence = toke.tokenize();
|
||||
String[] sent3 = {str1};
|
||||
String[] tag3 = {"PRP", "MD", "VB", "PRP", "."}; // Parser gets second "can" wrong without help
|
||||
List<TaggedWord> sentence2 = new ArrayList<>();
|
||||
for (int i = 0; i < sent3.length; i++) {
|
||||
sentence2.add(new TaggedWord(sent3[i], tag3[i]));
|
||||
}
|
||||
//parse.pennPrint();
|
||||
List<List<? extends HasWord>> tmp
|
||||
= new ArrayList<>();
|
||||
tmp.add(sentence2);
|
||||
tmp.add(sentence);
|
||||
sentences = tmp;
|
||||
for (List<? extends HasWord> sentence1 : sentences) {
|
||||
Tree parse1 = lp.parse(sentence1);
|
||||
GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
|
||||
GrammaticalStructure gs = gsf.newGrammaticalStructure(parse1);
|
||||
double score = parse1.score();
|
||||
if (score > sreturn) {
|
||||
//System.out.println("\n score : " + score + "\n");
|
||||
sreturn = score;
|
||||
}
|
||||
}
|
||||
return sreturn;
|
||||
}
|
||||
}
|
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package FunctionLayer.misc;
|
||||
|
||||
import DataLayer.DataMapper;
|
||||
import FunctionLayer.CustomError;
|
||||
import FunctionLayer.SimilarityMatrix;
|
||||
import static FunctionLayer.MYSQLDatahandler.EXPIRE_TIME_IN_SECONDS;
|
||||
import com.google.common.base.Stopwatch;
|
||||
import com.google.common.collect.ArrayListMultimap;
|
||||
import com.google.common.collect.MapMaker;
|
||||
import com.google.common.collect.Multimap;
|
||||
import java.io.IOException;
|
||||
import java.sql.SQLException;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class SentimentSimilarityCacheObsolete {
|
||||
|
||||
public static final long EXPIRE_TIME_IN_SECONDS = TimeUnit.SECONDS.convert(5, TimeUnit.HOURS);
|
||||
private static ConcurrentMap<String, List<SimilarityMatrix>> SimilarityMatrixCache;
|
||||
private static Stopwatch stopwatch;
|
||||
|
||||
public SentimentSimilarityCacheObsolete(ConcurrentMap<Integer, String> StringCache, Stopwatch stopwatch) {
|
||||
this.stopwatch = Stopwatch.createUnstarted();
|
||||
this.SimilarityMatrixCache = new MapMaker().concurrencyLevel(2).makeMap();
|
||||
}
|
||||
|
||||
public void clearConCurrentMaps() {
|
||||
SimilarityMatrixCache.clear();
|
||||
}
|
||||
|
||||
private Multimap<String, SimilarityMatrix> getCache() throws SQLException, IOException, CustomError {
|
||||
List<SimilarityMatrix> matrixlist;
|
||||
matrixlist = DataMapper.getAllSementicMatrixes();
|
||||
Multimap<String, SimilarityMatrix> LHM = ArrayListMultimap.create();
|
||||
for (int i = 0; i < matrixlist.size(); i++) {
|
||||
LHM.put(matrixlist.get(i).getPrimaryString(), matrixlist.get(i));
|
||||
}
|
||||
return LHM;
|
||||
}
|
||||
|
||||
public static String getResponseStr(String strreturn) {
|
||||
String str = "";
|
||||
if (stopwatch.elapsed(TimeUnit.SECONDS) >= EXPIRE_TIME_IN_SECONDS || !stopwatch.isRunning()) {
|
||||
|
||||
if (!stopwatch.isRunning()) {
|
||||
stopwatch.start();
|
||||
} else {
|
||||
stopwatch.reset();
|
||||
}
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,152 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
https://github.com/DonatoMeoli/WS4J
|
||||
*/
|
||||
package FunctionLayer.misc;
|
||||
|
||||
import edu.cmu.lti.jawjaw.pobj.POS;
|
||||
import edu.cmu.lti.lexical_db.ILexicalDatabase;
|
||||
import edu.cmu.lti.lexical_db.NictWordNet;
|
||||
import edu.cmu.lti.lexical_db.data.Concept;
|
||||
import edu.cmu.lti.ws4j.Relatedness;
|
||||
import edu.cmu.lti.ws4j.RelatednessCalculator;
|
||||
import edu.cmu.lti.ws4j.impl.HirstStOnge;
|
||||
import edu.cmu.lti.ws4j.impl.JiangConrath;
|
||||
import edu.cmu.lti.ws4j.impl.LeacockChodorow;
|
||||
import edu.cmu.lti.ws4j.impl.Lesk;
|
||||
import edu.cmu.lti.ws4j.impl.Lin;
|
||||
import edu.cmu.lti.ws4j.impl.Path;
|
||||
import edu.cmu.lti.ws4j.impl.Resnik;
|
||||
import edu.cmu.lti.ws4j.impl.WuPalmer;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
* https://www.programcreek.com/java-api-examples/?api=edu.cmu.lti.ws4j.RelatednessCalculator
|
||||
* https://stackoverflow.com/questions/36300485/how-to-resolve-the-difference-between-the-values-attained-in-the-web-api-and-the
|
||||
*/
|
||||
public class WordNetSimalarityObsolete {
|
||||
|
||||
private static ILexicalDatabase db = new NictWordNet();
|
||||
private static RelatednessCalculator rc1 = new WuPalmer(db);
|
||||
private static RelatednessCalculator rc2 = new Resnik(db);
|
||||
private static RelatednessCalculator rc3 = new JiangConrath(db);
|
||||
private static RelatednessCalculator rc4 = new Lin(db);
|
||||
private static RelatednessCalculator rc5 = new LeacockChodorow(db);
|
||||
private static RelatednessCalculator rc6 = new Path(db);
|
||||
private static RelatednessCalculator rc7 = new Lesk(db);
|
||||
private static RelatednessCalculator rc8 = new HirstStOnge(db);
|
||||
|
||||
public static double SentenceMatcherSimilarityMatrix(String[] words1, String[] words2, double maxScore) {
|
||||
{
|
||||
double[][] s1 = getSimilarityMatrix(words1, words2, rc1);
|
||||
for (int i = 0; i < words1.length; i++) {
|
||||
for (int j = 0; j < words2.length; j++) {
|
||||
if (s1[i][j] < maxScore && s1[i][j] > 0.0) {
|
||||
System.out.print(s1[i][j] + "\t");
|
||||
System.out.println("WuPalmer");
|
||||
maxScore = s1[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
double[][] s2 = getSimilarityMatrix(words1, words2, rc2);
|
||||
for (int i = 0; i < words1.length; i++) {
|
||||
for (int j = 0; j < words2.length; j++) {
|
||||
if (s2[i][j] < maxScore && s2[i][j] > 0.0) {
|
||||
System.out.println("Resnik");
|
||||
System.out.print(s2[i][j] + "\t");
|
||||
maxScore = s2[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
double[][] s2 = getSimilarityMatrix(words1, words2, rc3);
|
||||
for (int i = 0; i < words1.length; i++) {
|
||||
for (int j = 0; j < words2.length; j++) {
|
||||
if (s2[i][j] < maxScore && s2[i][j] > 0.0) {
|
||||
System.out.println("JiangConrath");
|
||||
System.out.print(s2[i][j] + "\t");
|
||||
maxScore = s2[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
double[][] s2 = getSimilarityMatrix(words1, words2, rc4);
|
||||
for (int i = 0; i < words1.length; i++) {
|
||||
for (int j = 0; j < words2.length; j++) {
|
||||
if (s2[i][j] < maxScore && s2[i][j] > 0.0) {
|
||||
System.out.println("Lin");
|
||||
System.out.print(s2[i][j] + "\t");
|
||||
maxScore = s2[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
double[][] s2 = getSimilarityMatrix(words1, words2, rc5);
|
||||
for (int i = 0; i < words1.length; i++) {
|
||||
for (int j = 0; j < words2.length; j++) {
|
||||
if (s2[i][j] < maxScore && s2[i][j] > 0.0) {
|
||||
System.out.print(s2[i][j] + "\t");
|
||||
System.out.println("LeacockChodrow");
|
||||
maxScore = s2[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
double[][] s2 = getSimilarityMatrix(words1, words2, rc6);
|
||||
for (int i = 0; i < words1.length; i++) {
|
||||
for (int j = 0; j < words2.length; j++) {
|
||||
if (s2[i][j] < maxScore && s2[i][j] > 0.0) {
|
||||
System.out.println("Path");
|
||||
System.out.print(s2[i][j] + "\t");
|
||||
maxScore = s2[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
double[][] s2 = getSimilarityMatrix(words1, words2, rc7);
|
||||
for (int i = 0; i < words1.length; i++) {
|
||||
for (int j = 0; j < words2.length; j++) {
|
||||
if (s2[i][j] < maxScore && s2[i][j] > 0.0) {
|
||||
System.out.println("Lesk");
|
||||
System.out.print(s2[i][j] + "\t");
|
||||
maxScore = s2[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
double[][] s2 = getSimilarityMatrix(words1, words2, rc8);
|
||||
for (int i = 0; i < words1.length; i++) {
|
||||
for (int j = 0; j < words2.length; j++) {
|
||||
if (s2[i][j] < maxScore && s2[i][j] > 0.0) {
|
||||
System.out.println("HirstStOnge");
|
||||
System.out.print(s2[i][j] + "\t");
|
||||
maxScore = s2[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return maxScore;
|
||||
}
|
||||
|
||||
public static double[][] getSimilarityMatrix(String[] words1, String[] words2, RelatednessCalculator rc) {
|
||||
double[][] result = new double[words1.length][words2.length];
|
||||
for (int i = 0; i < words1.length; i++) {
|
||||
for (int j = 0; j < words2.length; j++) {
|
||||
double score = rc.calcRelatednessOfWords(words1[i], words2[j]);
|
||||
result[i][j] = score;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package FunctionLayer.misc;
|
||||
|
||||
import edu.cmu.lti.lexical_db.ILexicalDatabase;
|
||||
import edu.cmu.lti.lexical_db.NictWordNet;
|
||||
import edu.cmu.lti.ws4j.RelatednessCalculator;
|
||||
import edu.cmu.lti.ws4j.impl.HirstStOnge;
|
||||
import edu.cmu.lti.ws4j.impl.JiangConrath;
|
||||
import edu.cmu.lti.ws4j.impl.LeacockChodorow;
|
||||
import edu.cmu.lti.ws4j.impl.Lesk;
|
||||
import edu.cmu.lti.ws4j.impl.Lin;
|
||||
import edu.cmu.lti.ws4j.impl.Path;
|
||||
import edu.cmu.lti.ws4j.impl.Resnik;
|
||||
import edu.cmu.lti.ws4j.impl.WuPalmer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
* http://ws4jdemo.appspot.com/?mode=s&s1=something+like+a+sentence&s2=should+not+be+like+the+first+one
|
||||
*/
|
||||
public class WordNetSimalarityTestObsolete {
|
||||
|
||||
private static ILexicalDatabase db = new NictWordNet();
|
||||
private static RelatednessCalculator rc1 = new WuPalmer(db);
|
||||
private static RelatednessCalculator rc2 = new Resnik(db);
|
||||
private static RelatednessCalculator rc3 = new JiangConrath(db);
|
||||
private static RelatednessCalculator rc4 = new Lin(db);
|
||||
private static RelatednessCalculator rc5 = new LeacockChodorow(db);
|
||||
private static RelatednessCalculator rc6 = new Path(db);
|
||||
private static RelatednessCalculator rc7 = new Lesk(db);
|
||||
private static RelatednessCalculator rc8 = new HirstStOnge(db);
|
||||
|
||||
public static double SentenceMatcherSimilarityMatrix(String[] words1, String[] words2, double maxScore) {
|
||||
boolean initial = maxScore == 0.0;
|
||||
List<RelatednessCalculator> RCList = new ArrayList();
|
||||
RCList.add(rc1);
|
||||
RCList.add(rc2);
|
||||
RCList.add(rc3);
|
||||
RCList.add(rc4);
|
||||
RCList.add(rc5);
|
||||
RCList.add(rc6);
|
||||
RCList.add(rc7);
|
||||
RCList.add(rc8);
|
||||
for (int h = 0; h < RCList.size(); h++) {
|
||||
double s1 = getSimilarityMatrix(words1, words2, RCList.get(h), maxScore, initial);
|
||||
System.out.println("s1: " + String.format("%.0f", s1) + " \nmaxScore: " + maxScore);
|
||||
if (s1 > 0.01 && (s1 < maxScore || initial)) {
|
||||
maxScore = s1;
|
||||
}
|
||||
}
|
||||
return maxScore;
|
||||
}
|
||||
|
||||
public static double getSimilarityMatrix(String[] words1, String[] words2, RelatednessCalculator rc, double maxScore, boolean initial) {
|
||||
double rtndouble = 0.01;
|
||||
for (int i = 0; i < words1.length; i++) {
|
||||
for (int j = 0; j < words2.length; j++) {
|
||||
if (maxScore > rtndouble / words2.length || initial) {
|
||||
rtndouble += (rc.calcRelatednessOfWords(words1[i], words2[j]));
|
||||
//System.out.println("RelatednessCalculator: " + rc.toString());
|
||||
} else {
|
||||
return rtndouble;
|
||||
}
|
||||
}
|
||||
}
|
||||
return rtndouble;
|
||||
}
|
||||
}
|
58
ArtificialAutism/src/main/java/FunctionLayer/misc/notes.java
Normal file
58
ArtificialAutism/src/main/java/FunctionLayer/misc/notes.java
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
*/
|
||||
package FunctionLayer.misc;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class notes {
|
||||
/*
|
||||
|
||||
/*
|
||||
ILexicalDatabase db = new NictWordNet();
|
||||
RelatednessCalculator lesk = new Lesk(db);
|
||||
POS posWord1 = POS.n;
|
||||
POS posWord2 = POS.n;
|
||||
double maxScore = 0;
|
||||
WS4JConfiguration.getInstance().setMFS(true);
|
||||
List<Concept> synsets1 = (List<Concept>) db.getAllConcepts(strreturn, posWord1.name());
|
||||
for (int i = 0; i < allStringValuesPresent.size(); i++) {
|
||||
List<Concept> synsets2 = (List<Concept>) db.getAllConcepts(allStringValuesPresent.get(i), posWord2.name());
|
||||
for (Concept synset1 : synsets1) {
|
||||
for (Concept synset2 : synsets2) {
|
||||
Relatedness relatedness = lesk.calcRelatednessOfSynset(synset1, synset2);
|
||||
double score = relatedness.getScore();
|
||||
if (score > maxScore) {
|
||||
maxScore = score;
|
||||
index = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static RelatednessCalculator[] rcs;
|
||||
|
||||
static {
|
||||
WS4JConfiguration.getInstance().setMemoryDB(false);
|
||||
WS4JConfiguration.getInstance().setMFS(true);
|
||||
ILexicalDatabase db = new MITWordNet();
|
||||
rcs = new RelatednessCalculator[]{
|
||||
new HirstStOnge(db), new LeacockChodorow(db), new Lesk(db), new WuPalmer(db),
|
||||
new Resnik(db), new JiangConrath(db), new Lin(db), new Path(db)
|
||||
};
|
||||
}
|
||||
https://github.com/DonatoMeoli/WS4J
|
||||
https://www.programcreek.com/2014/01/calculate-words-similarity-using-wordnet-in-java/
|
||||
*/
|
||||
/*
|
||||
//available options of metrics
|
||||
private static RelatednessCalculator[] rcs = { new HirstStOnge(db),
|
||||
new LeacockChodorow(db), new Lesk(db), new WuPalmer(db),
|
||||
new Resnik(db), new JiangConrath(db), new Lin(db), new Path(db) };
|
||||
*/
|
||||
}
|
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* To change this license header, choose License Headers in Project Properties.
|
||||
* To change this template file, choose Tools | Templates
|
||||
* and open the template in the editor.
|
||||
|
||||
ps ax | grep EventNotfierDiscordBot-1.0
|
||||
kill $pid (number)
|
||||
|
||||
nohup screen -d -m -S nonRoot java -Xmx5048M -jar /home/Artificial_Autism/ArtificialAutism-1.0.jar
|
||||
nohup screen -d -m -S nonRoot java -Xmx4048M -jar /home/Artificial_Autism/ArtificialAutism-1.0.jar
|
||||
|
||||
nohup screen -d -m -S gameservers java -Xmx2450M -jar /home/gameservers/ArtificialAutism/ArtificialAutism-1.0.jar
|
||||
screen -ls (number1)
|
||||
screen -X -S (number1) quit
|
||||
*/
|
||||
//https://discordapp.com/developers/applications/
|
||||
//https://github.com/Javacord/Javacord
|
||||
package PresentationLayer;
|
||||
|
||||
import FunctionLayer.CustomError;
|
||||
import FunctionLayer.MYSQLDatahandler;
|
||||
import FunctionLayer.MessageResponseHandler;
|
||||
import FunctionLayer.StanfordParser.SentimentAnalyzerTest;
|
||||
import java.io.IOException;
|
||||
import java.sql.SQLException;
|
||||
import java.util.List;
|
||||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
import org.javacord.api.DiscordApi;
|
||||
import org.javacord.api.DiscordApiBuilder;
|
||||
import org.javacord.api.entity.user.User;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author install1
|
||||
*/
|
||||
public class DiscordHandler {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SentimentAnalyzerTest.shiftReduceParserInitiate();
|
||||
new Thread(() -> {
|
||||
try {
|
||||
MYSQLDatahandler.instance.initiateMYSQL();
|
||||
System.out.println("finished initiating MYSQL");
|
||||
} catch (SQLException | IOException ex) {
|
||||
Logger.getLogger(DiscordHandler.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}).start();
|
||||
String token = "NTI5NzAxNTk5NjAyMjc4NDAx.Dw0vDg.7-aMjVWdQMYPl8qVNyvTCPS5F_A";
|
||||
DiscordApi api = new DiscordApiBuilder().setToken(token).login().join();
|
||||
api.addMessageCreateListener(event -> {
|
||||
if (!event.getMessage().getAuthor().isYourself()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
String strtest = event.getServerTextChannel().get().getCategory().toString();
|
||||
strtest = strtest.substring(9, strtest.length() - 1);
|
||||
boolean channelpermissionsDenied = false;
|
||||
switch (strtest) {
|
||||
case "Server Area": {
|
||||
if (!event.getServerTextChannel().get().toString().contains("chat-live")) {
|
||||
channelpermissionsDenied = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "Public Area": {
|
||||
break;
|
||||
}
|
||||
case "Information Area": {
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
channelpermissionsDenied = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!channelpermissionsDenied) {
|
||||
List<User> userlist = event.getMessage().getMentionedUsers();
|
||||
String strresult = event.getMessage().toString();
|
||||
if (userlist != null) {
|
||||
for (int i = 0; i < userlist.size(); i++) {
|
||||
strresult = strresult.replace(userlist.get(i).getIdAsString(), "");
|
||||
}
|
||||
}
|
||||
MessageResponseHandler.getMessage(strresult);
|
||||
try {
|
||||
MYSQLDatahandler.instance.checkIfUpdateStrings();
|
||||
MYSQLDatahandler.instance.checkIfUpdateMatrixes();
|
||||
} catch (CustomError ex) {
|
||||
Logger.getLogger(DiscordHandler.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
//contains to specify one channel where bot may always type
|
||||
if (event.getMessage().getMentionedUsers().contains(api.getYourself())
|
||||
|| event.getServerTextChannel().get().toString().contains("minor-test")) {
|
||||
String ResponseStr;
|
||||
try {
|
||||
ResponseStr = MessageResponseHandler.selectReponseMessage(event.getMessage().toString());
|
||||
if (!ResponseStr.isEmpty()) {
|
||||
System.out.print("\nResponseStr3: " + ResponseStr);
|
||||
event.getChannel().sendMessage(ResponseStr);
|
||||
}
|
||||
} catch (CustomError ex) {
|
||||
Logger.getLogger(DiscordHandler.class.getName()).log(Level.SEVERE, null, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
BIN
ArtificialAutism/target/ArtificialAutism-1.0.jar
Normal file
BIN
ArtificialAutism/target/ArtificialAutism-1.0.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/classes/DataLayer/DBCPDataSource.class
Normal file
BIN
ArtificialAutism/target/classes/DataLayer/DBCPDataSource.class
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/classes/DataLayer/DataMapper.class
Normal file
BIN
ArtificialAutism/target/classes/DataLayer/DataMapper.class
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/classes/FunctionLayer/CustomError.class
Normal file
BIN
ArtificialAutism/target/classes/FunctionLayer/CustomError.class
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
ArtificialAutism/target/classes/FunctionLayer/misc/notes.class
Normal file
BIN
ArtificialAutism/target/classes/FunctionLayer/misc/notes.class
Normal file
Binary file not shown.
Binary file not shown.
BIN
ArtificialAutism/target/lib/StanfordParser-1.0.jar
Normal file
BIN
ArtificialAutism/target/lib/StanfordParser-1.0.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/animal-sniffer-annotations-1.14.jar
Normal file
BIN
ArtificialAutism/target/lib/animal-sniffer-annotations-1.14.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/checker-qual-2.5.2.jar
Normal file
BIN
ArtificialAutism/target/lib/checker-qual-2.5.2.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/commons-codec-1.10.jar
Normal file
BIN
ArtificialAutism/target/lib/commons-codec-1.10.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/commons-dbcp2-2.5.0.jar
Normal file
BIN
ArtificialAutism/target/lib/commons-dbcp2-2.5.0.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/commons-logging-1.2.jar
Normal file
BIN
ArtificialAutism/target/lib/commons-logging-1.2.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/commons-pool2-2.6.0.jar
Normal file
BIN
ArtificialAutism/target/lib/commons-pool2-2.6.0.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/error_prone_annotations-2.1.3.jar
Normal file
BIN
ArtificialAutism/target/lib/error_prone_annotations-2.1.3.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/guava-26.0-jre.jar
Normal file
BIN
ArtificialAutism/target/lib/guava-26.0-jre.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/j2objc-annotations-1.1.jar
Normal file
BIN
ArtificialAutism/target/lib/j2objc-annotations-1.1.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/jackson-annotations-2.9.0.jar
Normal file
BIN
ArtificialAutism/target/lib/jackson-annotations-2.9.0.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/jackson-core-2.9.3.jar
Normal file
BIN
ArtificialAutism/target/lib/jackson-core-2.9.3.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/jackson-databind-2.9.3.jar
Normal file
BIN
ArtificialAutism/target/lib/jackson-databind-2.9.3.jar
Normal file
Binary file not shown.
68
ArtificialAutism/target/lib/javacord-3.0.1.pom
Normal file
68
ArtificialAutism/target/lib/javacord-3.0.1.pom
Normal file
@ -0,0 +1,68 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.javacord</groupId>
|
||||
<artifactId>javacord</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>Javacord</name>
|
||||
<description>An easy to use multithreaded library for creating Discord bots in Java</description>
|
||||
<url>https://www.javacord.org</url>
|
||||
<inceptionYear>2015</inceptionYear>
|
||||
<licenses>
|
||||
<license>
|
||||
<name>Apache License, Version 2.0</name>
|
||||
<url>https://www.apache.org/licenses/LICENSE-2.0.txt</url>
|
||||
<distribution>repo</distribution>
|
||||
<comments>A business-friendly OSS license</comments>
|
||||
</license>
|
||||
</licenses>
|
||||
<developers>
|
||||
<developer>
|
||||
<id>BtoBastian</id>
|
||||
<name>Bastian Oppermann</name>
|
||||
<email>bastianoppermann1997@gmail.com</email>
|
||||
<url>https://github.com/BtoBastian</url>
|
||||
<timezone>Europe/Berlin</timezone>
|
||||
</developer>
|
||||
</developers>
|
||||
<contributors>
|
||||
<contributor>
|
||||
<name>Björn Kautler</name>
|
||||
<email>Bjoern@Kautler.net</email>
|
||||
<url>https://github.com/Vampire</url>
|
||||
<timezone>Europe/Berlin</timezone>
|
||||
</contributor>
|
||||
</contributors>
|
||||
<scm>
|
||||
<connection>scm:git:https://github.com/Javacord/Javacord.git</connection>
|
||||
<developerConnection>scm:git:git@github.com:Javacord/Javacord.git</developerConnection>
|
||||
<url>https://github.com/Javacord/Javacord</url>
|
||||
</scm>
|
||||
<issueManagement>
|
||||
<system>GitHub</system>
|
||||
<url>https://github.com/Javacord/Javacord/issues</url>
|
||||
</issueManagement>
|
||||
<ciManagement>
|
||||
<system>TeamCity</system>
|
||||
<url>https://ci.javacord.org/project.html?projectId=Javacord&guest=1</url>
|
||||
</ciManagement>
|
||||
<distributionManagement>
|
||||
<downloadUrl>https://github.com/Javacord/Javacord/releases</downloadUrl>
|
||||
</distributionManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.javacord</groupId>
|
||||
<artifactId>javacord-api</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.javacord</groupId>
|
||||
<artifactId>javacord-core</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
BIN
ArtificialAutism/target/lib/javacord-api-3.0.1.jar
Normal file
BIN
ArtificialAutism/target/lib/javacord-api-3.0.1.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/javacord-core-3.0.1.jar
Normal file
BIN
ArtificialAutism/target/lib/javacord-core-3.0.1.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/jsr305-3.0.2.jar
Normal file
BIN
ArtificialAutism/target/lib/jsr305-3.0.2.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/log4j-api-2.11.0.jar
Normal file
BIN
ArtificialAutism/target/lib/log4j-api-2.11.0.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/logging-interceptor-3.9.1.jar
Normal file
BIN
ArtificialAutism/target/lib/logging-interceptor-3.9.1.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/mysql-connector-java-8.0.13.jar
Normal file
BIN
ArtificialAutism/target/lib/mysql-connector-java-8.0.13.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/nv-websocket-client-1.31.jar
Normal file
BIN
ArtificialAutism/target/lib/nv-websocket-client-1.31.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/okhttp-3.9.1.jar
Normal file
BIN
ArtificialAutism/target/lib/okhttp-3.9.1.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/okio-1.13.0.jar
Normal file
BIN
ArtificialAutism/target/lib/okio-1.13.0.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/protobuf-java-3.6.1.jar
Normal file
BIN
ArtificialAutism/target/lib/protobuf-java-3.6.1.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/simmetrics-core-4.1.1.jar
Normal file
BIN
ArtificialAutism/target/lib/simmetrics-core-4.1.1.jar
Normal file
Binary file not shown.
BIN
ArtificialAutism/target/lib/ws4j-1.0.1.jar
Normal file
BIN
ArtificialAutism/target/lib/ws4j-1.0.1.jar
Normal file
Binary file not shown.
5
ArtificialAutism/target/maven-archiver/pom.properties
Normal file
5
ArtificialAutism/target/maven-archiver/pom.properties
Normal file
@ -0,0 +1,5 @@
|
||||
#Generated by Maven
|
||||
#Sat Feb 02 20:31:02 CET 2019
|
||||
version=1.0
|
||||
groupId=com.mycompany
|
||||
artifactId=ArtificialAutism
|
Loading…
Reference in New Issue
Block a user