nlp_Processor.py 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. from .... import nltk, spacy, re
  2. from abc import ABC, abstractmethod
  3. import unittest
  4. class NLPInterface(ABC):
  5. """
  6. 定义一个抽象基类,用于处理自然语言处理(NLP)任务。
  7. """
  8. @abstractmethod
  9. def __init__(self):
  10. """
  11. 初始化方法,子类必须实现。
  12. """
  13. pass
  14. @abstractmethod
  15. def split_sentences(self, text):
  16. """
  17. 将输入的文本分割成句子列表。
  18. Args:
  19. text (str): 需要分割的文本。
  20. Returns:
  21. list: 分割后的句子列表。
  22. """
  23. pass
  24. @staticmethod
  25. def _print_sentences(sentences,count=(20, 20)):
  26. """
  27. 打印分割后的句子列表,根据语言类型区分。
  28. Args:
  29. sentences (list): 分割后的句子列表。
  30. """
  31. filtered_sentences = []
  32. for sentence in sentences:
  33. sentence = re.sub(re.compile("\t|\n"), '', str(sentence))
  34. if re.match(r'^[a-zA-Z]', sentence):
  35. if len(sentence.split()) > count[0]:
  36. filtered_sentences.append(str(sentence))
  37. elif re.match(r'^[\u4e00-\u9fa5]', sentence):
  38. if len(sentence) >= count[1]:
  39. filtered_sentences.append(str(sentence))
  40. return filtered_sentences
  41. class SpacyNLP(NLPInterface):
  42. """
  43. 使用Spacy进行NLP处理的类。
  44. """
  45. def __init__(self):
  46. """
  47. 初始化Spacy模型。
  48. """
  49. self.nlp = spacy.load('zh_core_web_sm')
  50. def split_sentences(self, text):
  51. """
  52. 使用Spacy分割文本成句子列表。
  53. Args:
  54. text (str): 需要分割的文本。
  55. Returns:
  56. list: 分割后的句子列表。
  57. """
  58. doc = self.nlp(text)
  59. sentences = [sent.text for sent in doc.sents]
  60. sentences = self._print_sentences(sentences,(5,5))
  61. return sentences
  62. class NLTKNLP(NLPInterface):
  63. """
  64. 使用NLTK进行NLP处理的类。
  65. """
  66. def __init__(self):
  67. """
  68. 初始化NLTK分割句子模型。
  69. """
  70. self.nlp = nltk.sent_tokenize
  71. def split_sentences(self, text):
  72. """
  73. 使用NLTK分割文本成句子列表。
  74. Args:
  75. text (str): 需要分割的文本。
  76. Returns:
  77. list: 分割后的句子列表。
  78. """
  79. sentences = [sent for sent in self.nlp(text) if sent]
  80. sentences = self._print_sentences(sentences,(5,5))
  81. return sentences
  82. class TestNLPProcessor(unittest.TestCase):
  83. def setUp(self):
  84. self.spacy_nlp = SpacyNLP()
  85. self.nltk_nlp = NLTKNLP()
  86. def test_spacy_split_sentences(self):
  87. text = "This is a test sentence. Another sentence for testing."
  88. expected_sentences = ["This is a test sentence.", "Another sentence for testing."]
  89. # self.assertEqual(self.spacy_nlp.split_sentences(text), expected_sentences)
  90. print(self.spacy_nlp.split_sentences(text))
  91. def test_nltk_split_sentences(self):
  92. text = "This is a test sentence. Another sentence for testing."
  93. expected_sentences = ["This is a test sentence.", "Another sentence for testing."]
  94. # self.assertEqual(self.nltk_nlp.split_sentences(text), expected_sentences)
  95. print(self.spacy_nlp.split_sentences(text))
  96. if __name__ == '__main__':
  97. unittest.main()