|
@@ -0,0 +1,560 @@
|
|
|
+package com.gqy.common.utils.hanlp;
|
|
|
+
|
|
|
+import javafx.util.Pair;
|
|
|
+import org.deeplearning4j.models.word2vec.Word2Vec;
|
|
|
+import org.deeplearning4j.models.word2vec.VocabWord;
|
|
|
+import org.deeplearning4j.models.word2vec.wordstore.inmemory.AbstractCache;
|
|
|
+import org.deeplearning4j.models.word2vec.wordstore.VocabCache;
|
|
|
+import org.nd4j.linalg.api.ndarray.INDArray;
|
|
|
+import org.nd4j.linalg.factory.Nd4j;
|
|
|
+import org.slf4j.Logger;
|
|
|
+import org.slf4j.LoggerFactory;
|
|
|
+import com.hankcs.hanlp.HanLP;
|
|
|
+import com.hankcs.hanlp.seg.common.Term;
|
|
|
+
|
|
|
+import java.io.BufferedReader;
|
|
|
+import java.io.File;
|
|
|
+import java.io.FileNotFoundException;
|
|
|
+import java.io.FileReader;
|
|
|
+import java.io.IOException;
|
|
|
+import java.util.*;
|
|
|
+import java.util.stream.Collectors;
|
|
|
+import java.util.concurrent.*;
|
|
|
+import java.util.concurrent.atomic.AtomicInteger;
|
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
|
+import java.util.concurrent.ConcurrentHashMap;
|
|
|
+import java.util.concurrent.CountDownLatch;
|
|
|
+import java.util.concurrent.ThreadFactory;
|
|
|
+
|
|
|
+/**
|
|
|
+ * Word2Vec 词语相似度计算工具类
|
|
|
+ */
|
|
|
+public class Word2VecWordSimilarity {
|
|
|
+ private static final Logger log = LoggerFactory.getLogger(Word2VecWordSimilarity.class);
|
|
|
+ private static Word2Vec vec;
|
|
|
+ private static final String FASTTEXT_PATH = "D:\\word2\\wiki.zh.vec"; // 修改为你的实际路径
|
|
|
+
|
|
|
+ // 添加静态Map来存储词向量
|
|
|
+ static Map<String, INDArray> wordVectorsMap = new HashMap<>();
|
|
|
+
|
|
|
+ static {
|
|
|
+ try {
|
|
|
+ initializeModel();
|
|
|
+ } catch (Exception e) {
|
|
|
+ log.error("FastText模型初始化失败", e);
|
|
|
+ throw new RuntimeException("FastText模型初始化失败", e);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 初始化FastText模型
|
|
|
+ */
|
|
|
+ private static void initializeModel() throws IOException {
|
|
|
+ log.info("开始加载FastText模型...");
|
|
|
+ File vectorFile = new File(FASTTEXT_PATH);
|
|
|
+ if (!vectorFile.exists()) {
|
|
|
+ throw new FileNotFoundException("FastText向量文件不存在: " + FASTTEXT_PATH);
|
|
|
+ }
|
|
|
+
|
|
|
+ try (BufferedReader reader = new BufferedReader(new FileReader(vectorFile), 8 * 1024 * 1024)) {
|
|
|
+ log.info("成功打开文件,开始读取...");
|
|
|
+
|
|
|
+ // 读取第一行获取维度信息
|
|
|
+ String firstLine = reader.readLine();
|
|
|
+ String[] dims = firstLine.split(" ");
|
|
|
+ int vocabSize = Integer.parseInt(dims[0]);
|
|
|
+ int vectorSize = Integer.parseInt(dims[1]);
|
|
|
+ log.info("词汇量: {}, 向量维度: {}", vocabSize, vectorSize);
|
|
|
+
|
|
|
+ // 预分配容量
|
|
|
+ wordVectorsMap = new ConcurrentHashMap<>(vocabSize);
|
|
|
+
|
|
|
+ // 读取所有行到内存
|
|
|
+ log.info("开始读取词向量...");
|
|
|
+ String line;
|
|
|
+ int lineCount = 0;
|
|
|
+ int successCount = 0;
|
|
|
+
|
|
|
+ while ((line = reader.readLine()) != null) {
|
|
|
+ lineCount++;
|
|
|
+ try {
|
|
|
+ String[] tokens = line.trim().split("\\s+");
|
|
|
+ if (tokens.length != vectorSize + 1) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ float[] vector = new float[vectorSize];
|
|
|
+ for (int j = 0; j < vectorSize; j++) {
|
|
|
+ vector[j] = Float.parseFloat(tokens[j + 1]);
|
|
|
+ }
|
|
|
+ wordVectorsMap.put(tokens[0], Nd4j.create(vector));
|
|
|
+ successCount++;
|
|
|
+
|
|
|
+ if (lineCount % 10000 == 0) {
|
|
|
+ log.info("已处理 {} 行,成功加载 {} 个词向量", lineCount, successCount);
|
|
|
+ }
|
|
|
+ } catch (Exception e) {
|
|
|
+ log.warn("处理第 {} 行时发生错误: {}", lineCount, e.getMessage());
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ log.info("FastText模型加载完成,总行数: {},成功加载词向量: {}", lineCount, successCount);
|
|
|
+
|
|
|
+ // 创建Word2Vec实例
|
|
|
+ vec = new Word2Vec.Builder()
|
|
|
+ .layerSize(vectorSize)
|
|
|
+ .minWordFrequency(1)
|
|
|
+ .iterations(1)
|
|
|
+ .epochs(1)
|
|
|
+ .learningRate(0.025)
|
|
|
+ .windowSize(5)
|
|
|
+ .build();
|
|
|
+
|
|
|
+ // 设置词汇表
|
|
|
+ VocabCache<VocabWord> vocabCache = new AbstractCache<>();
|
|
|
+ for (String word : wordVectorsMap.keySet()) {
|
|
|
+ VocabWord vocabWord = new VocabWord(1.0, word);
|
|
|
+ vocabCache.addToken(vocabWord);
|
|
|
+ vocabCache.addWordToIndex(vocabCache.numWords(), word);
|
|
|
+ }
|
|
|
+ vec.setVocab(vocabCache);
|
|
|
+
|
|
|
+ } catch (Exception e) {
|
|
|
+ log.error("加载FastText模型失败", e);
|
|
|
+ throw new RuntimeException("加载FastText模型失败", e);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 计算两个词的相似度
|
|
|
+ */
|
|
|
+ public static double calculateSimilarity(String text1, String text2) {
|
|
|
+ try {
|
|
|
+ log.info("开始计算文本相似度: '{}' vs '{}'", text1, text2);
|
|
|
+
|
|
|
+ // 对输入文本进行分词,并过滤掉标点符号和停用词
|
|
|
+ List<Term> terms1 = HanLP.segment(text1).stream()
|
|
|
+ .filter(term -> !term.nature.startsWith("w") // 过滤标点符号
|
|
|
+ && !isStopWord(term.word)) // 过滤停用词
|
|
|
+ .collect(Collectors.toList());
|
|
|
+
|
|
|
+ List<Term> terms2 = HanLP.segment(text2).stream()
|
|
|
+ .filter(term -> !term.nature.startsWith("w")
|
|
|
+ && !isStopWord(term.word))
|
|
|
+ .collect(Collectors.toList());
|
|
|
+
|
|
|
+ log.info("分词结果(过滤后): text1={}, text2={}", terms1, terms2);
|
|
|
+
|
|
|
+ // 获取所有词的向量并计算加权平均值
|
|
|
+ INDArray vector1 = getWeightedAverageVector(terms1);
|
|
|
+ INDArray vector2 = getWeightedAverageVector(terms2);
|
|
|
+
|
|
|
+ if (vector1 == null || vector2 == null) {
|
|
|
+ log.warn("无法为文本生成有效的向量表示: text1={}, text2={}",
|
|
|
+ vector1 == null ? "null" : "valid",
|
|
|
+ vector2 == null ? "null" : "valid");
|
|
|
+ return 0.0;
|
|
|
+ }
|
|
|
+
|
|
|
+ // 计算余弦相似度
|
|
|
+ double similarity = calculateCosineSimilarity(vector1, vector2);
|
|
|
+
|
|
|
+ // 应用长度惩罚因子
|
|
|
+ double lengthPenalty = calculateLengthPenalty(terms1.size(), terms2.size());
|
|
|
+ similarity *= lengthPenalty;
|
|
|
+
|
|
|
+ log.info("最终相似度计算完成: rawSimilarity={}, lengthPenalty={}, finalSimilarity={}",
|
|
|
+ similarity/lengthPenalty, lengthPenalty, similarity);
|
|
|
+ return similarity;
|
|
|
+ } catch (Exception e) {
|
|
|
+ log.error("计算文本相似度失败: {} vs {}", text1, text2, e);
|
|
|
+ return 0.0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private static boolean isStopWord(String word) {
|
|
|
+ // 扩展停用词表
|
|
|
+ Set<String> stopWords = new HashSet<>(Arrays.asList(
|
|
|
+ "的", "了", "和", "是", "就", "都", "而", "及", "与", "着",
|
|
|
+ "之", "用", "其", "中", "你", "我", "他", "她", "它", "要",
|
|
|
+ "把", "被", "让", "在", "有", "个", "好", "这", "那", "什么",
|
|
|
+ "啊", "哦", "呢", "吧", "啦", "么", "呀", "嘛", "哪", "那么",
|
|
|
+ "这么", "怎么", "为", "以", "到", "得", "着", "过", "很", "对",
|
|
|
+ "真", "的话", "所以", "因为", "但是", "不过", "可以", "现在"
|
|
|
+ ));
|
|
|
+ return stopWords.contains(word);
|
|
|
+ }
|
|
|
+
|
|
|
+ private static INDArray getWeightedAverageVector(List<Term> terms) {
|
|
|
+ try {
|
|
|
+ log.debug("开始计算加权平均向量,输入词项数: {}", terms.size());
|
|
|
+ List<Pair<INDArray, Double>> vectorsWithWeights = new ArrayList<>();
|
|
|
+ double totalWeight = 0.0;
|
|
|
+
|
|
|
+ for (Term term : terms) {
|
|
|
+ String word = term.word;
|
|
|
+ INDArray vector = wordVectorsMap.get(word);
|
|
|
+ if (vector != null) {
|
|
|
+ // 根据词性赋予不同权重
|
|
|
+ double weight = getTermWeight(term);
|
|
|
+ vectorsWithWeights.add(new Pair<>(vector, weight));
|
|
|
+ totalWeight += weight;
|
|
|
+ log.debug("找到词 '{}' 的向量,权重: {}", word, weight);
|
|
|
+ } else {
|
|
|
+ log.debug("词 '{}' 不在词向量表中", word);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (vectorsWithWeights.isEmpty()) {
|
|
|
+ log.warn("没有找到任何有效的词向量,输入词项: {}", terms);
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+
|
|
|
+ // 计算加权平均
|
|
|
+ INDArray weightedSum = Nd4j.zeros(vectorsWithWeights.get(0).getKey().shape());
|
|
|
+ for (Pair<INDArray, Double> pair : vectorsWithWeights) {
|
|
|
+ weightedSum.addi(pair.getKey().mul(pair.getValue()));
|
|
|
+ }
|
|
|
+ return weightedSum.divi(totalWeight);
|
|
|
+ } catch (Exception e) {
|
|
|
+ log.error("计算加权平均向量失败,输入词项: {}", terms, e);
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private static double getTermWeight(Term term) {
|
|
|
+ // 根据词性赋予权重
|
|
|
+ switch (term.nature.toString()) {
|
|
|
+ case "n": // 名词
|
|
|
+ case "v": // 动词
|
|
|
+ case "a": // 形容词
|
|
|
+ return 1.0;
|
|
|
+ case "t": // 时间词
|
|
|
+ case "s": // 处所词
|
|
|
+ return 0.8;
|
|
|
+ case "f": // 方位词
|
|
|
+ case "b": // 区别词
|
|
|
+ return 0.6;
|
|
|
+ default:
|
|
|
+ return 0.4;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private static double calculateCosineSimilarity(INDArray vector1, INDArray vector2) {
|
|
|
+ double dotProduct = vector1.mul(vector2).sumNumber().doubleValue();
|
|
|
+ double norm1 = vector1.norm2Number().doubleValue();
|
|
|
+ double norm2 = vector2.norm2Number().doubleValue();
|
|
|
+ return dotProduct / (norm1 * norm2);
|
|
|
+ }
|
|
|
+
|
|
|
+ private static double calculateLengthPenalty(int len1, int len2) {
|
|
|
+ // 对长度差异进行惩罚
|
|
|
+ double ratio = Math.min(len1, len2) / (double) Math.max(len1, len2);
|
|
|
+ return Math.pow(ratio, 0.5); // 使用平方根来减轻惩罚程度
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 判断两个词是否相似
|
|
|
+ */
|
|
|
+ public static boolean areWordsSimilar(String word1, String word2, double threshold) {
|
|
|
+ try {
|
|
|
+ log.info("开始判断词语相似性: word1='{}', word2='{}', threshold={}", word1, word2, threshold);
|
|
|
+
|
|
|
+ if (word1 == null || word2 == null) {
|
|
|
+ log.warn("输入词语为null: word1={}, word2={}", word1, word2);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (word1.equals(word2)) {
|
|
|
+ log.info("词语完全相同,直接返回true");
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ // 预处理:去除标点符号
|
|
|
+ word1 = word1.replaceAll("[\\p{P}\\p{S}]", "");
|
|
|
+ word2 = word2.replaceAll("[\\p{P}\\p{S}]", "");
|
|
|
+
|
|
|
+ if (word1.isEmpty() || word2.isEmpty()) {
|
|
|
+ log.warn("清理标点符号后文本为空: word1='{}', word2='{}'", word1, word2);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ // 获取词性
|
|
|
+ List<Term> terms1 = HanLP.segment(word1);
|
|
|
+ List<Term> terms2 = HanLP.segment(word2);
|
|
|
+
|
|
|
+ log.info("分词结果: terms1={}, terms2={}", terms1, terms2);
|
|
|
+
|
|
|
+ // 提取谓语动词和核心名词
|
|
|
+ Map<String, String> keyTerms1 = extractKeyTerms(terms1);
|
|
|
+ Map<String, String> keyTerms2 = extractKeyTerms(terms2);
|
|
|
+
|
|
|
+ log.info("核心词分析: keyTerms1={}, keyTerms2={}", keyTerms1, keyTerms2);
|
|
|
+
|
|
|
+ // 检查谓语动词的相似性
|
|
|
+ boolean verbSimilar = checkVerbSimilarity(keyTerms1.get("verb"), keyTerms2.get("verb"));
|
|
|
+
|
|
|
+ // 检查核心名词的重叠
|
|
|
+ Set<String> nouns1 = Arrays.stream(keyTerms1.getOrDefault("noun", "").split(","))
|
|
|
+ .filter(s -> !s.isEmpty())
|
|
|
+ .collect(Collectors.toSet());
|
|
|
+ Set<String> nouns2 = Arrays.stream(keyTerms2.getOrDefault("noun", "").split(","))
|
|
|
+ .filter(s -> !s.isEmpty())
|
|
|
+ .collect(Collectors.toSet());
|
|
|
+ Set<String> commonNouns = new HashSet<>(nouns1);
|
|
|
+ commonNouns.retainAll(nouns2);
|
|
|
+
|
|
|
+ // 计算相似度
|
|
|
+ double similarity = calculateSimilarity(word1, word2);
|
|
|
+
|
|
|
+ // 动态调整阈值
|
|
|
+ double adjustedThreshold = threshold;
|
|
|
+
|
|
|
+ // 问句和陈述句的处理
|
|
|
+ boolean isQuestion1 = isQuestion(word1);
|
|
|
+ boolean isQuestion2 = isQuestion(word2);
|
|
|
+
|
|
|
+ // 如果一个是问句一个是陈述句,且谓语动词不相似,提高阈值
|
|
|
+ if (isQuestion1 != isQuestion2 && !verbSimilar) {
|
|
|
+ adjustedThreshold *= 1.3;
|
|
|
+ }
|
|
|
+
|
|
|
+ // 如果谓语动词不相似,提高阈值
|
|
|
+ if (!verbSimilar) {
|
|
|
+ adjustedThreshold *= 1.2;
|
|
|
+ }
|
|
|
+
|
|
|
+ // 长度差异调整
|
|
|
+ int lenDiff = Math.abs(terms1.size() - terms2.size());
|
|
|
+ if (lenDiff > 2) {
|
|
|
+ adjustedThreshold *= (1.0 + lenDiff * 0.1);
|
|
|
+ }
|
|
|
+
|
|
|
+ boolean isSimilar = similarity >= adjustedThreshold &&
|
|
|
+ (verbSimilar || !commonNouns.isEmpty());
|
|
|
+
|
|
|
+ log.info("相似度判断完成: rawSimilarity={}, adjustedThreshold={}, verbSimilar={}, " +
|
|
|
+ "commonNouns={}, isQuestion1={}, isQuestion2={}, isSimilar={}",
|
|
|
+ similarity, adjustedThreshold, verbSimilar,
|
|
|
+ commonNouns, isQuestion1, isQuestion2, isSimilar);
|
|
|
+
|
|
|
+ return isSimilar;
|
|
|
+ } catch (Exception e) {
|
|
|
+ log.error("判断词语相似性失败: {} vs {}", word1, word2, e);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private static Map<String, String> extractKeyTerms(List<Term> terms) {
|
|
|
+ Map<String, String> keyTerms = new HashMap<>();
|
|
|
+ List<String> nouns = new ArrayList<>();
|
|
|
+ String mainVerb = null;
|
|
|
+
|
|
|
+ for (Term term : terms) {
|
|
|
+ String nature = term.nature.toString();
|
|
|
+ if (nature.startsWith("v") && mainVerb == null) {
|
|
|
+ mainVerb = term.word;
|
|
|
+ } else if (nature.startsWith("n")) {
|
|
|
+ nouns.add(term.word);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ keyTerms.put("verb", mainVerb);
|
|
|
+ keyTerms.put("noun", String.join(",", nouns));
|
|
|
+ return keyTerms;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static boolean checkVerbSimilarity(String verb1, String verb2) {
|
|
|
+ if (verb1 == null || verb2 == null) {
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ // 定义一些同义动词组
|
|
|
+ Set<Set<String>> synonymGroups = new HashSet<>();
|
|
|
+ synonymGroups.add(new HashSet<>(Arrays.asList("穿", "戴", "套")));
|
|
|
+ synonymGroups.add(new HashSet<>(Arrays.asList("买", "购买", "选购")));
|
|
|
+ synonymGroups.add(new HashSet<>(Arrays.asList("洗", "清洗", "淋")));
|
|
|
+
|
|
|
+ // 检查两个动词是否属于同一个同义组
|
|
|
+ return verb1.equals(verb2) ||
|
|
|
+ synonymGroups.stream().anyMatch(group -> group.contains(verb1) && group.contains(verb2));
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 获取与给定词最相似的N个词
|
|
|
+ */
|
|
|
+ public static Collection<String> findSimilarWords(String text, int n) {
|
|
|
+ try {
|
|
|
+ log.info("开始查找与文本 '{}' 相似的 {} 个词", text, n);
|
|
|
+
|
|
|
+ List<Term> terms = HanLP.segment(text);
|
|
|
+ log.info("分词结果: {}", terms);
|
|
|
+
|
|
|
+ INDArray queryVector = getWeightedAverageVector(terms);
|
|
|
+ if (queryVector == null) {
|
|
|
+ log.warn("无法为文本 '{}' 生成有效的向量表示", text);
|
|
|
+ return Collections.emptyList();
|
|
|
+ }
|
|
|
+ log.info("成功生成查询向量");
|
|
|
+
|
|
|
+ // 使用并行流和ConcurrentHashMap来加速计算
|
|
|
+ log.info("开始并行计算相似度...");
|
|
|
+ ConcurrentHashMap<String, Double> similarities = new ConcurrentHashMap<>(wordVectorsMap.size());
|
|
|
+ AtomicInteger processedCount = new AtomicInteger(0);
|
|
|
+
|
|
|
+ // 将词向量分批处理
|
|
|
+ int batchSize = 1000;
|
|
|
+ List<List<Map.Entry<String, INDArray>>> batches = new ArrayList<>();
|
|
|
+ List<Map.Entry<String, INDArray>> currentBatch = new ArrayList<>();
|
|
|
+
|
|
|
+ for (Map.Entry<String, INDArray> entry : wordVectorsMap.entrySet()) {
|
|
|
+ currentBatch.add(entry);
|
|
|
+ if (currentBatch.size() == batchSize) {
|
|
|
+ batches.add(new ArrayList<>(currentBatch));
|
|
|
+ currentBatch.clear();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (!currentBatch.isEmpty()) {
|
|
|
+ batches.add(currentBatch);
|
|
|
+ }
|
|
|
+
|
|
|
+ // 并行处理每个批次
|
|
|
+ batches.parallelStream().forEach(batch -> {
|
|
|
+ for (Map.Entry<String, INDArray> entry : batch) {
|
|
|
+ String word = entry.getKey();
|
|
|
+ INDArray vector = entry.getValue();
|
|
|
+
|
|
|
+ // 批量计算相似度
|
|
|
+ double similarity = queryVector.mul(vector).sumNumber().doubleValue() /
|
|
|
+ (queryVector.norm2Number().doubleValue() * vector.norm2Number().doubleValue());
|
|
|
+ similarities.put(word, similarity);
|
|
|
+
|
|
|
+ int count = processedCount.incrementAndGet();
|
|
|
+ if (count % 10000 == 0) {
|
|
|
+ log.info("已处理 {} / {} 个词向量", count, wordVectorsMap.size());
|
|
|
+ }
|
|
|
+ }
|
|
|
+ });
|
|
|
+
|
|
|
+ log.info("相似度计算完成,开始排序...");
|
|
|
+
|
|
|
+ // 使用优先队列来获取top N,避免全量排序
|
|
|
+ PriorityQueue<Map.Entry<String, Double>> topN = new PriorityQueue<>(
|
|
|
+ n + 1, Map.Entry.<String, Double>comparingByValue()
|
|
|
+ );
|
|
|
+
|
|
|
+ for (Map.Entry<String, Double> entry : similarities.entrySet()) {
|
|
|
+ topN.offer(entry);
|
|
|
+ if (topN.size() > n) {
|
|
|
+ topN.poll();
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // 转换结果
|
|
|
+ List<String> result = new ArrayList<>(n);
|
|
|
+ while (!topN.isEmpty()) {
|
|
|
+ result.add(0, topN.poll().getKey());
|
|
|
+ }
|
|
|
+
|
|
|
+ log.info("找到 {} 个相似词: {}", result.size(), result);
|
|
|
+ return result;
|
|
|
+ } catch (Exception e) {
|
|
|
+ log.error("查找相似词失败: {}", text, e);
|
|
|
+ return Collections.emptyList();
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 检查词是否在词汇表中
|
|
|
+ */
|
|
|
+ public static boolean hasWord(String word) {
|
|
|
+ return vec.hasWord(word);
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 获取词向量
|
|
|
+ */
|
|
|
+ public static double[] getWordVector(String word) {
|
|
|
+ try {
|
|
|
+ return vec.getWordVector(word);
|
|
|
+ } catch (Exception e) {
|
|
|
+ log.error("获取词向量失败: {}", word, e);
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private static boolean isQuestion(String text) {
|
|
|
+ // 问号判断
|
|
|
+ if (text.contains("?") || text.contains("?")) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ // 疑问词判断
|
|
|
+ Set<String> questionWords = new HashSet<>(Arrays.asList(
|
|
|
+ "什么", "怎么", "怎样", "如何", "哪", "谁", "为什么", "几",
|
|
|
+ "多少", "是否", "能否", "可否", "吗", "呢", "吧", "啊",
|
|
|
+ "嘛", "呀", "哪里", "哪儿", "何时", "为何", "多久"
|
|
|
+ ));
|
|
|
+
|
|
|
+ // 分词后检查是否包含疑问词
|
|
|
+ List<Term> terms = HanLP.segment(text);
|
|
|
+ for (Term term : terms) {
|
|
|
+ if (questionWords.contains(term.word)) {
|
|
|
+ log.debug("检测到疑问词: {}", term.word);
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // 语气词判断(句尾)
|
|
|
+ if (terms.size() > 0) {
|
|
|
+ String lastWord = terms.get(terms.size() - 1).word;
|
|
|
+ Set<String> questionTones = new HashSet<>(Arrays.asList(
|
|
|
+ "吗", "呢", "吧", "啊", "嘛", "呀", "么"
|
|
|
+ ));
|
|
|
+ if (questionTones.contains(lastWord)) {
|
|
|
+ log.debug("检测到句尾疑问语气词: {}", lastWord);
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // 特殊句式判断
|
|
|
+ String[] questionPatterns = {
|
|
|
+ "是不是", "对不对", "行不行", "要不要", "能不能", "可不可以",
|
|
|
+ "有没有", "对吧", "是吧", "好吧"
|
|
|
+ };
|
|
|
+ for (String pattern : questionPatterns) {
|
|
|
+ if (text.contains(pattern)) {
|
|
|
+ log.debug("检测到疑问句式: {}", pattern);
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ log.debug("未检测到问句特征");
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ // 使用示例
|
|
|
+ public static void main(String[] args) {
|
|
|
+ try {
|
|
|
+ // 计算相似度
|
|
|
+ String word1 = "你要选择什么产品?";
|
|
|
+ String word2 = "产品名称";
|
|
|
+ // double similarity = calculateSimilarity(word1, word2);
|
|
|
+ //System.out.println("相似度: " + similarity);
|
|
|
+
|
|
|
+ // 查找相似词
|
|
|
+// Collection<String> similarWords = findSimilarWords(word1, 5);
|
|
|
+// System.out.println("相似词: " + similarWords);
|
|
|
+
|
|
|
+ // 判断是否相似
|
|
|
+ boolean isSimilar = areWordsSimilar(word1, word2, 0.7);
|
|
|
+ System.out.println("是否相似: " + isSimilar);
|
|
|
+
|
|
|
+ } catch (Exception e) {
|
|
|
+ e.printStackTrace();
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|