123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749 |
- # 导入 backend/__init__.py 中的所有内容
- from .. import *
- # # 模型
- from backend.models import Knowledgebase, DocumentKbm, File2document, File, Task, TaskSublist, KbmDocumentType
- from django.db.models import Count, Case, When, IntegerField, Q, Max
- from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
- from django.shortcuts import get_object_or_404
- from django.db import transaction
- from django.core.exceptions import ObjectDoesNotExist
- from backend.Service.MinioService import MinioService
- from tabulate import tabulate
- from scipy.spatial.distance import cosine
- from minio import Minio
- from django.conf import settings
- import pytesseract
- from DCbackend.utils.common import success, fail
- from DCbackend.settings import MILVUS_HOST, MILVUS_PORT, VECTOR_DIMENSION, IPINFO, MILVUS_USER, MILVUS_PASSWORD,MINIO_ENDPOINT,MINIO_ACCESS_KEY,MINIO_SECRET_KEY,MINIO_SECURE
- # 导入pymilvus库中的连接、集合、字段模式、集合模式、数据类型和实用工具
- from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility
- from bs4 import BeautifulSoup
- # 从service_utils模块中导入rabbitmq和pdf_utils
- from .service_utils import rabbitmq as rabbitmq_Process , pdf_utils,ocr_utils
- import chardet
- import threading # 基于线程的并行处理,用于多线程编程
- os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
- class DocumentQueue:
- QUEUE_KEY = "document_process_queue"
- minio_client = Minio(
- MINIO_ENDPOINT,
- access_key=MINIO_ACCESS_KEY,
- secret_key=MINIO_SECRET_KEY,
- secure=MINIO_SECURE # 根据您的 Minio 配置选择 True 或 False
- )
- if os.name == 'nt': # Windows
- pytesseract.pytesseract.tesseract_cmd = r'D:\Program Files\OCR\tesseract.exe'
- else: # macOS 或 Linux
- pytesseract.pytesseract.tesseract_cmd = r'/usr/bin/tesseract'
- class KbmService:
- NLP_ZH = spacy.load('zh_core_web_sm')
- bert_model = None
- bert_tokenizer = None
- # 大模型地址
- API_URL = f"http://{IPINFO}:11434/api/embeddings"
- semaphore = threading.Semaphore(4)
-
- # 通用rabbitmq 放入队列
- def send_to_rabbitmq(queue_name, message):
- """
- 将消息发送到指定的RabbitMQ队列
- Args:
- queue_name (str): 目标RabbitMQ队列的名称
- message (dict): 需要发送的消息
- Returns:
- bool: 消息发送成功返回True,失败返回False
- """
- return rabbitmq_Process.send_to_rabbitmq(queue_name, message)
- @staticmethod
- def selectBucketInfo(request):
- """
- 从请求中获取知识库信息,包括文档数量统计
- Args:
- request (HttpRequest): 请求对象,包含查询条件
- Returns:
- dict: 成功时返回包含所有知识库信息和文档计数的字典,失败时返回包含错误信息的字典
- """
- #user_id = request.POST.get("user_id")
- knowledgebases = Knowledgebase.objects.filter().exclude(status=4).order_by('-create_time').values('id', 'create_time', 'name', 'doc_num', 'description')
- result = [] # 初始化结果列表
- for kb in knowledgebases:
- # 对每个知识库,统计不同类型文档的数量
- counts = DocumentKbm.objects.filter(kb_id=kb['id']).aggregate(
- word_count=Count(Case(When(type__in=['doc', 'docx'], then=1), output_field=IntegerField())),
- pdf_count=Count(Case(When(type='pdf', then=1), output_field=IntegerField())),
- excel_count=Count(Case(When(type__in=['xls', 'xlsx'], then=1), output_field=IntegerField()))
- )
- # 创建知识库数据的副本,以避免修改原始数据
- kb_data = kb.copy()
- # 将统计的计数更新到知识库数据副本中
- kb_data.update(counts)
- # 将更新后的知识库数据添加到结果列表中
- result.append(kb_data)
- # 返回包含所有知识库信息和文档计数的成功响应
- return success(result)
-
- @staticmethod
- def getFileInfo(request):
- """
- 获取文件信息
- 从请求的POST数据中获取bucket_id、page、pageSize、object_name、run、type和doc_type_id,用于查询和分页文档信息。
- 查询条件包括知识库ID、文档名称、运行标识、文档类型和文档类型ID。结果包括文档信息和分页信息。
- Args:
- request (HttpRequest): 请求对象,包含查询条件。
- Returns:
- dict: 成功时返回包含文档信息和分页信息的字典,失败时返回包含错误信息的字典。
- """
- try:
- bucket_id = request.POST.get("bucket_id")
- page = request.POST.get("page", 1)
- per_page = request.POST.get("pageSize", 10)
- object_name = request.POST.get("object_name", "")
- run = request.POST.get("run", "")
- type = request.POST.get("type", "")
- doc_type_id = request.POST.get("doc_type_id")
- if not bucket_id:
- return fail("bucket_id为空")
- # 确保 page 和 per_page 是整数
- page = int(page)
- per_page = int(per_page)
- # # 查询文档并排序
- # documents = DocumentKbm.objects.filter(
- # Q(kb_id=bucket_id) &
- # Q(name__icontains=object_name)&
- # Q(run__icontains=run)&
- # Q(type__icontains=type)&
- # Q(doc_type_id=doc_type_id)&
- # ~Q(status=4)
- # ).order_by('-create_time')
- # 构建基本查询
- query = Q(kb_id=bucket_id) & Q(name__icontains=object_name) & Q(run__icontains=run) & Q(type__icontains=type) & ~Q(status=4)
- # 如果 doc_type_id 有值,则添加到查询条件
- if doc_type_id:
- query &= Q(doc_type_id=doc_type_id)
- # 查询文档并排序
- documents = DocumentKbm.objects.filter(query).order_by('-create_time')
- # 创建分页器
- paginator = Paginator(documents, per_page)
- try:
- # 获取指定页的结果
- documents_page = paginator.page(page)
- except PageNotAnInteger:
- # 如果页码不是整数,返回第一页
- documents_page = paginator.page(1)
- except EmptyPage:
- # 如果页码超出范围,返回最后一页
- documents_page = paginator.page(paginator.num_pages)
- # 将查询结果转换为列表
- result = list(documents_page.object_list.values())
-
- # 循环整体会导致mysql多连反应,(后期应该改成数据载加到内存环境后一步塞入数据库中)
- # 发现人 qman
- for info in result:
- document_id = info['id']
- max_page = TaskSublist.objects.filter(doc_id=document_id).aggregate(Max('page_number'))['page_number__max']
- info['max_page'] = max_page if max_page is not None else 0
- pagination_info = {
- 'total_count': paginator.count,
- 'total_pages': paginator.num_pages,
- 'total_size': per_page,
- 'current_page': documents_page.number,
- 'has_next': documents_page.has_next(),
- 'has_previous': documents_page.has_previous()
- }
- data = {
- 'pagination': pagination_info,
- 'documents': result
- }
- return success(data)
- except Exception as e:
- return fail("获取信息失败")
-
- @staticmethod
- @transaction.atomic
- def updateName(request):
- """
- 更新文档的名称
- 从请求的POST数据中获取新名称和文档ID,更新对应的DocumentKbm对象和关联的File对象的名称。
- 如果新名称不包含扩展名,则使用原始文件的扩展名。
- Args:
- request (HttpRequest): 请求对象,包含新名称和文档ID。
- Returns:
- dict: 成功时返回包含成功信息的字典,失败时返回包含错误信息的字典。
- """
- try:
- new_name = request.POST.get("new_name")
- document_id = request.POST.get("document_id")
- if not new_name or not document_id:
- return fail("新名称和文件ID不能为空")
- # 获取 DocumentKbm 实例并更新
- document = get_object_or_404(DocumentKbm, id=document_id)
- location = document.location
- # 获取原始文件的扩展名
- _, original_extension = os.path.splitext(location)
- # 检查新名称是否包含扩展名,如果没有则添加原始扩展名
- _, new_extension = os.path.splitext(new_name)
- if not new_extension:
- new_name = f"{new_name}{original_extension}"
- document.name = new_name
- document.save()
- # 获取关联的 File2document 和 File
- file2doc = File2document.objects.filter(document_id=document_id).first()
- if file2doc:
- file = get_object_or_404(File, id=file2doc.file_id)
- file.name = new_name
- file.save()
- else:
- # 记录一个警告,因为没有找到关联的 File
- logger.error(f"Warning: No associated File found for DocumentKbm with id {document_id}")
- return success("文件名更新成功")
- except ObjectDoesNotExist:
- return fail("指定的文件或关联文件不存在")
- except Exception as e:
- return fail(f"更新文件名失败: {str(e)}")
-
- @staticmethod
- @transaction.atomic
- def deleteDocument(request):
- """
- 删除文档
- 从请求的POST数据中获取文档ID,更新文档和关联文件的状态为已删除。然后,更新知识库的文档数量。
- 最后,尝试清理Milvus中的相关数据,并断开Milvus连接。
- Args:
- request (HttpRequest): 请求对象,包含文档ID。
- Returns:
- dict: 成功时返回包含成功信息的字典,失败时返回包含错误信息的字典。
- """
- document_id = request.POST.get("document_id")
- document = get_object_or_404(DocumentKbm, id=document_id)
- document.status = 4
- document.save()
- file2doc = File2document.objects.filter(document_id=document_id).first()
- if file2doc:
- file = get_object_or_404(File, id=file2doc.file_id)
- file.status = 4
- file.save()
- kb_id = document.kb_id
- new_count = DocumentKbm.objects.filter(kb_id=kb_id).exclude(status=4).count()
- Knowledgebase.objects.filter(id=kb_id).update(doc_num=new_count)
- try:
- # 需要 拆列方法 石峰
- # 1 、 删除 minio
- # 2、 删除 milvus
- # 清理milvus
- # 连接到 Milvus
- connections.connect("default", host=MILVUS_HOST, port=MILVUS_PORT ,user=MILVUS_USER,password=MILVUS_PASSWORD)
- kmb = Knowledgebase.objects.filter(id=kb_id).first()
-
- tasks = TaskSublist.objects.filter(doc_id=document.id)
- KbmService.clearPreviousData(document_id,document.name)
- # collection = Collection(kmb.location)
- # tasks = TaskSublist.objects.filter(doc_id=document.id)
- # for task in tasks:
- # logger.info(f'删除{task.milvus_id}')
- # expr = f'id in [{task.milvus_id}]'
- # collection.delete(expr)
- # minio_client.remove_object(kmb.location, tasks.name)
- # files = File.objects.filter(id=file2doc.file_id).first()
- # minio_client.remove_object(kmb.location, files.location)
-
-
- return success("删除成功")
- except Exception as e:
- return fail(f"删除milvus集合时发生错误: {str(e)}")
- finally:
- # 断开 Milvus 连接
- connections.disconnect("default")
-
- @staticmethod
- def getUrl(request):
- """
- 根据请求获取文档的URL。
- 从请求的POST数据中获取文档ID,通过文档ID获取对应的DocumentKbm对象和关联的Knowledgebase对象。
- 然后,使用MinioService.geturl方法获取文档的URL。
- Args:
- request (HttpRequest): 请求对象,包含文档ID。
- Returns:
- dict: 成功时返回包含文档URL的字典,失败时返回包含错误信息的字典。
- """
- try:
- document_id = request.POST.get("document_id")
- if not document_id:
- return fail("文档ID不能为空")
- # 获取 DocumentKbm 对象
- document = get_object_or_404(DocumentKbm, id=document_id)
- object_name = document.location
- # 获取对应的 Knowledgebase 对象
- knowledgebase = get_object_or_404(Knowledgebase, id=document.kb_id)
- bucket_name = knowledgebase.location
- return MinioService.geturl(object_name, bucket_name)
- except ObjectDoesNotExist:
- return fail("指定的文档或知识库不存在")
- except Exception as e:
- return fail(f"获取URL失败: {str(e)}")
-
- @staticmethod
- def get_embedding_excel(text, target_dim=768):
- """
- 根据输入文本获取其对应的嵌入向量。
- Args:
- text (str): 输入文本。
- target_dim (int, optional): 目标维度。默认为768。
- Returns:
- list: 嵌入向量的列表表示。
- """
- try:
- if not text or not text.strip():
- logger.warning("Empty text provided for embedding. Returning zero vector.")
- return np.zeros(target_dim).tolist()
- # 确保文本被正确编码
- encoded_text = text.encode('utf-8').decode('utf-8')
- payload = {
- "model": "nomic-embed-text:latest",
- "prompt": encoded_text
- }
- headers = {"Content-Type": "application/json"}
- #
- response = requests.post(KbmService.API_URL, json=payload, headers=headers)
- logger.info(f"response::::{response}")
- response.raise_for_status()
- embedding_data = response.json()
- if 'embedding' not in embedding_data:
- raise ValueError(f"API 响应中没有找到嵌入向量. 响应内容: {embedding_data}")
- embedding = embedding_data['embedding']
- original_embedding = np.array(embedding)
- if len(original_embedding) == target_dim:
- return original_embedding.tolist()
- # 如果原始维度不等于目标维度,进行插值
- original_indices = np.arange(len(original_embedding))
- new_indices = np.linspace(0, len(original_embedding) - 1, target_dim)
- f = interpolate.interp1d(original_indices, original_embedding)
- extended_embedding = f(new_indices)
- return extended_embedding.tolist()
- except requests.exceptions.RequestException as e:
- logger.error(f"API 请求错误: {str(e)}")
- raise
- except ValueError as e:
- logger.error(f"值错误: {str(e)}")
- raise
- except Exception as e:
- logger.error(f"获取文本嵌入时发生意外错误: {str(e)}")
- raise
- #新rabbitmq队列
- @staticmethod
- def analysis(request):
- """
- 分析请求并处理RabbitMQ队列中的消息。
- Args:
- request (object): 需要分析的请求对象。
- Returns:
- None
- """
- return rabbitmq_Process.analysis(request, KbmService)
- @staticmethod
- def check_and_process_queue():
- """
- 检查并处理RabbitMQ队列中的消息,启用多线程 。
- Returns:
- None
- """
- KbmService.should_stop = False
- while not KbmService.should_stop:
- try:
- if KbmService.queue_has_messages():
- KbmService.process_queue()
- else:
- # logger.info("队列为空,等待下一次检查...")
- time.sleep(60) # 等待60秒后再次检查
- except Exception as e:
- logger.error(f"检查队列时发生错误: {str(e)}")
- time.sleep(60) # 发生错误时,等待60秒后重试
- @staticmethod
- def queue_has_messages():
- """
- 检查RabbitMQ队列中是否有消息。
- Returns:
- bool: 如果队列中有消息返回True,否则返回False。
- """
- try:
- connection = KbmService.create_connection()
- channel = connection.channel()
- queue = channel.queue_declare(queue=settings.RABBITMQ_QUEUE_NAME, passive=True)
- message_count = queue.method.message_count
- connection.close()
- return message_count > 0
- except Exception as e:
- logger.error(f"检查队列消息数量时发生错误: {str(e)}")
- return False
-
- @staticmethod
- def create_connection():
- """
- 创建到RabbitMQ服务器的连接。
- Returns:
- pika.BlockingConnection: 连接对象。
- """
- return pika.BlockingConnection(pika.ConnectionParameters(
- host=settings.RABBITMQ_HOST,
- port=settings.RABBITMQ_PORT,
- credentials=pika.PlainCredentials(
- settings.RABBITMQ_USER,
- settings.RABBITMQ_PASSWORD
- )
- ))
-
- @staticmethod
- def process_queue():
- logger.info("队列中有消息,开始处理...")
- KbmService.connection = KbmService.create_connection()
- KbmService.channel = KbmService.connection.channel()
- KbmService.channel.queue_declare(queue=settings.RABBITMQ_QUEUE_NAME, durable=True)
- KbmService.channel.basic_qos(prefetch_count=4)
- KbmService.channel.basic_consume(queue=settings.RABBITMQ_QUEUE_NAME, on_message_callback=KbmService.callback)
- try:
- KbmService.channel.start_consuming()
- except KeyboardInterrupt:
- KbmService.should_stop = True
- finally:
- KbmService.close_connection()
- @staticmethod
- def callback(ch, method, properties, body):
- """
- 处理RabbitMQ队列中的消息。
- Args:
- ch (pika.channel.Channel): RabbitMQ频道对象。
- method (pika.spec.Basic.Deliver): 消息投递信息。
- properties (pika.spec.BasicProperties): 消息属性。
- body (bytes): 消息体。
- Raises:
- Exception: 处理消息时发生的任何异常。
- """
- with KbmService.semaphore:
- try:
- job = json.loads(body)
- document_id = job['document_id']
- start_page = job['start_page']
- end_page = job['end_page']
- max_tokens = job['max_tokens']
- logger.info(f"开始执行解析文档 {document_id}")
- KbmService.async_analysis(document_id, start_page, end_page, max_tokens)
- ch.basic_ack(delivery_tag=method.delivery_tag)
- except Exception as e:
- logger.error(f"处理队列消息时发生错误: {str(e)}")
- ch.basic_nack(delivery_tag=method.delivery_tag, requeue=True)
- # 检查是否还有更多消息
- if not KbmService.queue_has_messages():
- logger.info("队列处理完毕,停止消费...")
- ch.stop_consuming()
- @staticmethod
- def close_connection():
- if KbmService.channel:
- try:
- KbmService.channel.close()
- except Exception:
- pass
- if KbmService.connection:
- try:
- KbmService.connection.close()
- except Exception:
- pass
- KbmService.channel = None
- KbmService.connection = None
- @staticmethod
- def stop_service():
- KbmService.should_stop = True
- if KbmService.channel:
- KbmService.channel.stop_consuming()
- KbmService.close_connection()
- @staticmethod
- def get_embedding_excel(text, target_dim=768):
- """
- 获取文本的嵌入向量
- 从API获取文本的嵌入向量,并根据需要调整到目标维度。
- Args:
- text (str): 需要获取嵌入向量的文本。
- target_dim (int, optional): 目标维度。默认为768。
- Returns:
- list: 文本的嵌入向量列表,长度为target_dim。
- Raises:
- Exception: 处理文本嵌入时发生的任何异常。
- """
- try:
- if not text or not text.strip():
- logger.warning("Empty text provided for embedding. Returning zero vector.")
- return np.zeros(target_dim).tolist()
- # 确保文本被正确编码
- encoded_text = text.encode('utf-8').decode('utf-8')
- payload = {
- "model": "nomic-embed-text:latest",
- "prompt": encoded_text
- }
- headers = {"Content-Type": "application/json"}
- response = requests.post(KbmService.API_URL, json=payload, headers=headers)
- logger.info(f"response::::{response}")
- response.raise_for_status()
- embedding_data = response.json()
- if 'embedding' not in embedding_data:
- raise ValueError(f"API 响应中没有找到嵌入向量. 响应内容: {embedding_data}")
- embedding = embedding_data['embedding']
- original_embedding = np.array(embedding)
- if len(original_embedding) == target_dim:
- return original_embedding.tolist()
- # 如果原始维度不等于目标维度,进行插值
- original_indices = np.arange(len(original_embedding))
- new_indices = np.linspace(0, len(original_embedding) - 1, target_dim)
- f = interpolate.interp1d(original_indices, original_embedding)
- extended_embedding = f(new_indices)
- return extended_embedding.tolist()
- except requests.exceptions.RequestException as e:
- logger.error(f"API 请求错误: {str(e)}")
- raise
- except ValueError as e:
- logger.error(f"值错误: {str(e)}")
- raise
- except Exception as e:
- logger.error(f"获取文本嵌入时发生意外错误: {str(e)}")
- raise
- @classmethod
- def get_embedding_pdf(cls, text, target_dim=768, max_retries=3, backoff_factor=0.3):
- """
- 获取文本的嵌入向量,并填充或截断到目标维度,包含重试机制
- Args:
- text (str): 需要嵌入的文本。
- target_dim (int, optional): 目标维度。默认为 768。
- max_retries (int, optional): 最大重试次数。默认为 3。
- backoff_factor (float, optional): 每次重试的退避因子。默认为 0.3。
- Returns:
- np.ndarray: 填充或截断后的嵌入向量。
- """
- # 清楚 一些 垃圾 字符
- text = KbmService.post_process_text(text)
- for attempt in range(max_retries):
- try:
- payload = {
- "model": "nomic-embed-text:latest",
- "prompt": text
- }
- headers = {
- "Content-Type": "application/json"
- }
- response = requests.post(cls.API_URL, json=payload, headers=headers, timeout=30)
- sleep(0.3)
- response.raise_for_status()
- result = response.json()
- embedding = result.get('embedding')
-
- if embedding is None:
- raise ValueError("API 响应中没有找到嵌入向量")
- embedding_array = np.array(embedding)
- current_dim = embedding_array.shape[0]
- if current_dim < target_dim:
- padded_embedding = np.pad(embedding_array, (0, target_dim - current_dim), 'constant')
- logger.info(f"向量已从 {current_dim} 维填充到 {target_dim} 维")
- return padded_embedding
- elif current_dim > target_dim:
- truncated_embedding = embedding_array[:target_dim]
- logger.info(f"向量已从 {current_dim} 维截断到 {target_dim} 维")
- return truncated_embedding
- else:
- return embedding_array
- except RequestException as e:
- logger.error(f"API 请求错误 (尝试 {attempt + 1}/{max_retries}): {e}")
- if attempt == max_retries - 1:
- raise
- time.sleep(backoff_factor * (2 ** attempt))
- except ValueError as e:
- logger.error(f"解析响应错误: {e}")
- raise
- except Exception as e:
- logger.error(f"获取文本嵌入时发生未知错误: {e}")
- raise
- raise Exception("达到最大重试次数,无法获取嵌入")
- @staticmethod
- def split_text_by_semantic(sentences, max_tokens, bucket_name, similarity_threshold=0.5, batch_size=1000):
- logger.info("开始分割文本并保存到向量数据库")
- chunks = []
- if len(sentences)<= 0:
- return chunks
- try:
- #object1 object1为后续可能添加的字段 因为无法直接修改名称 备用
- source = "知识库"
- object1 ="some_object1"
- object2 ="some_object2"
- # 连接到Milvus
- connections.connect("default", host=MILVUS_HOST, port=MILVUS_PORT,user=MILVUS_USER,password=MILVUS_PASSWORD)
- collection_name = f"{bucket_name}"
- collection = KbmService._get_or_create_collection(collection_name)
-
-
- current_chunk = sentences[0]
- current_embedding = KbmService.get_embedding_pdf(current_chunk, target_dim=VECTOR_DIMENSION)
- batch_data = []
- for sentence in sentences[1:]:
- if len(sentence) > 10:
- sentence_embedding = KbmService.get_embedding_pdf(sentence, target_dim=VECTOR_DIMENSION)
- similarity = 1 - cosine(current_embedding, sentence_embedding)
- if len(current_chunk) + len(sentence) <= max_tokens and similarity >= similarity_threshold:
- current_chunk += sentence
- current_embedding = (current_embedding + sentence_embedding) / 2
- else:
- batch_data.append((current_chunk, current_embedding))
- if len(batch_data) >= batch_size:
- ids = KbmService._insert_batch(collection, batch_data,source,object1,object2)
- sleep(1)
- logger.info("减少milvus压力睡眠1秒")
- if ids is not None:
- chunks.extend([{'content': chunk, 'milvus_id': id} for (chunk, _), id in zip(batch_data, ids)])
- else:
- logger.error("向 Milvus 插入批量数据失败,这批数据将被跳过")
- batch_data = []
- current_chunk = sentence
- current_embedding = sentence_embedding
- # 处理最后一个chunk和剩余的batch数据
- if current_chunk:
- batch_data.append((current_chunk, current_embedding))
- if batch_data:
- ids = KbmService._insert_batch(collection, batch_data,source,object1,object2)
- sleep(1)
- logger.info("减少milvus压力睡眠1秒")
- if ids is not None:
- chunks.extend([{'content': chunk, 'milvus_id': id} for (chunk, _), id in zip(batch_data, ids)])
- else:
- logger.error("向 Milvus 插入批量数据失败,这批数据将被跳过")
- KbmService._create_index_and_load(collection)
- logger.info(f"成功将{len(chunks)}个文本块分割并保存到Milvus")
- return chunks
- except Exception as e:
- logger.error(f"处理文本时发生错误: {str(e)}")
- raise
- finally:
- connections.disconnect("default")
- @staticmethod
- def _get_or_create_collection(collection_name):
- if not utility.has_collection(collection_name):
- fields = [
- FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
- FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=65000),
- FieldSchema(name="object1", dtype=DataType.VARCHAR, max_length=65000),
- FieldSchema(name="object2", dtype=DataType.VARCHAR, max_length=65000),
- FieldSchema(name="content", dtype=DataType.VARCHAR, max_length=65000),
- FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=VECTOR_DIMENSION)
- ]
- schema = CollectionSchema(fields, "Semantic text chunks collection")
- return Collection(name=collection_name, schema=schema)
- return Collection(name=collection_name)
- @staticmethod
- def _split_sentences(text):
- sentences = re.split('([。!?])', text)
- sentences = [''.join(i) for i in zip(sentences[0::2], sentences[1::2] + [''])]
- return [s.strip() for s in sentences if s.strip()]
-
- @staticmethod
- def _insert_batch(collection, batch_data, source, object1, object2):
- try:
- entities = [
- [source] * len(batch_data), # source
- [object1]* len(batch_data), # object1
- [object2]* len(batch_data), # object2
- [chunk for chunk, _ in batch_data],
- [embedding.tolist() for _, embedding in batch_data]
- ]
- # 插入数据并获取插入操作的结果
- insert_result = collection.insert(entities)
- # 获取插入的 ID
- inserted_ids = insert_result.primary_keys
- return inserted_ids
- logger.info(f"成功插入{len(batch_data)}个文本块到Milvus")
- except Exception as e:
- logger.error(f"批量插入数据时发生错误: {str(e)}")
- @staticmethod
- def _create_index_and_load(collection):
- index_params = {
- "index_type": "IVF_FLAT",
- "metric_type": "L2",
- "params": {"nlist": 768}
- }
- collection.create_index("embedding", index_params)
- collection.load()
- @staticmethod
- def async_analysis(document_id, start_page, end_page, max_tokens):
- """
- 异步分析文档内容。
- :param document_id: 文档的唯一标识符。
- :param start_page: 分析开始的页面号。
- :param end_page: 分析结束的页面号。
- :param max_tokens: 分析的最大令牌数。
- 1 已解析、 2 未知: 、 3: 成功、 4: 失败、5: 待 处理
- """
- start_time = time.time()
- excel_status = 1
- result = []
- logger.info(f"开始处理文档 {document_id}")
- try:
- DocumentKbm.objects.filter(id=document_id).update(run=1) # 更新文档状态为处理中
- document = get_object_or_404(DocumentKbm, id=document_id)
- object_name = document.location
- file_extension = object_name.split('.')[-1].lower()
-
- knowledgebase = get_object_or_404(Knowledgebase, id=document.kb_id)
- bucket_name = knowledgebase.location
-
- logger.info(f"文档信息: object_name:{object_name}, file_extension:{file_extension}, bucket_name:{bucket_name}")
-
-
- KbmService.clearPreviousData(document_id, bucket_name)
-
- response = minio_client.get_object(bucket_name, object_name)
- if not response:
- raise ValueError(f"MinIO响应为空。行号:{sys._getframe().f_lineno}")
-
- response_content = response.read()
- if not response_content:
- raise ValueError(f"从MinIO读取的文件内容为空。行号:{sys._getframe().f_lineno}")
-
- file_content = BytesIO(response_content)
-
- if file_content.getbuffer().nbytes == 0:
- raise ValueError(f"文件内容为空。行号:{sys._getframe().f_lineno}")
-
- if file_extension in ['xls', 'xlsx']:
- logger.info(f"开始处理Excel文件: {document_id}")
- result, excel_status = KbmService.process_excel(file_content, document_id, max_tokens, bucket_name)
- elif file_extension == 'pdf':
- logger.info(f"开始处理PDF文件: {document_id}")
- result = KbmService.process_pdf(file_content, document_id, max_tokens, bucket_name)
- elif file_extension == 'md':
- logger.info(f"开始处理MD文件: {document_id}")
- result = KbmService.process_markdown(file_content, document_id, max_tokens, bucket_name)
- elif file_extension in ['doc', 'docx']:
- logger.info(f"开始将DOC/DOCX文件转换为PDF: {document_id}")
- pdf_content = KbmService.convert_doc_to_pdf(file_content)
- result = KbmService.process_pdf(pdf_content, document_id, max_tokens, bucket_name)
-
- if not result:
- raise ValueError("处理结果为空。")
-
- KbmService.saveTask(document_id, len(result))
- end_time = time.time()
- execution_time = round(end_time - start_time, 2)
- KbmService.updateDocument(max_tokens, len(result), document_id, execution_time)
-
- if excel_status == 6:
- DocumentKbm.objects.filter(id=document_id).update(run=6) # Excel 特殊情况
- logger.info(f"文档 {document_id} 更新完成,状态设置为6(Excel特殊情况)")
- else:
- DocumentKbm.objects.filter(id=document_id).update(run=3) # 假设3表示成功状态
- logger.info(f"文档 {document_id} 更新完成,状态设置为3(成功)")
- 861
- logger.info(f"文档 {document_id} 处理完成")
-
- except Exception as e:
- # logger.error(f"处理文档 {document_id} 时发生错误: {str(e)}")
- logger.error(f"异常堆栈跟踪:{str(e)}")
- traceback.print_exc()
- DocumentKbm.objects.filter(id=document_id).update(run=4) # 更新文档状态为失败
-
- @staticmethod
- def convert_doc_to_pdf(file_content):
- try:
- # 创建临时文件
- with tempfile.NamedTemporaryFile(delete=False, suffix='.docx') as temp_input:
- temp_input.write(file_content.getvalue())
- temp_input_path = temp_input.name
- temp_output_dir = tempfile.mkdtemp()
- # 查找 LibreOffice 路径
- libreoffice_path = KbmService.get_libreoffice_path()
- if not libreoffice_path:
- raise FileNotFoundError("找不到 LibreOffice 可执行文件")
- # 转换为 PDF
- pdf_path = KbmService.run_libreoffice_conversion(libreoffice_path, temp_input_path, temp_output_dir)
- # 读取 PDF 内容
- with open(pdf_path, 'rb') as pdf_file:
- pdf_content = pdf_file.read()
- # 读取并返回 PDF 内容
- with open(pdf_path, 'rb') as pdf_file:
- return BytesIO(pdf_file.read())
- except Exception as e:
- logger.error(f"将文档转换为 PDF 时出错: {str(e)}", exc_info=True)
- return BytesIO()
- finally:
- KbmService.cleanup_temp_files(temp_input_path, temp_output_dir)
- @staticmethod
- def get_libreoffice_path():
- system = platform.system()
- if system == "Windows":
- libreoffice_paths = [r"E:\tool\LibreOffice\program\soffice.exe"]
- return next((path for path in libreoffice_paths if os.path.exists(path)), None)
- else: # Linux 或 macOS
- for path in ['/usr/bin/libreoffice', '/usr/bin/soffice', '/opt/libreoffice/program/soffice']:
- if os.path.exists(path):
- return path
- return shutil.which('libreoffice') or shutil.which('soffice')
- @staticmethod
- def run_libreoffice_conversion(libreoffice_path, input_path, output_dir):
- cmd = [
- libreoffice_path,
- '--headless',
- '--convert-to', 'pdf:writer_pdf_Export:{"PageSize":{"Width":21000,"Height":29700}}',
- '--outdir', output_dir,
- input_path
- ]
- try:
- env = os.environ.copy()
- env['HOME'] = '/mnt/ql_api/tmp' # 设置一个临时的 HOME 目录
- env['LC_ALL'] = 'C' # 设置一个标准的语言环境
- sleep(0.3)
- result = subprocess.run(cmd, check=True, capture_output=True, text=True, timeout=60, env=env)
- logger.info(f"LibreOffice 转换输出: {result.stdout}")
- pdf_filename = os.path.splitext(os.path.basename(input_path))[0] + '.pdf'
- pdf_path = os.path.join(output_dir, pdf_filename)
- if not os.path.exists(pdf_path):
- raise FileNotFoundError(f"PDF 文件未生成。输出目录内容: {os.listdir(output_dir)}")
- # 使用 PyPDF2 检查页数
- with open(pdf_path, 'rb') as pdf_file:
- pdf_reader = PyPDF2.PdfReader(pdf_file)
- page_count = len(pdf_reader.pages)
- logger.info(f"生成的 PDF 文件页数: {page_count}")
- pdf_size = os.path.getsize(pdf_path)
- if pdf_size < 1000:
- logger.warning(f"生成的 PDF 文件大小异常小: {pdf_size} bytes")
- return pdf_path
- except subprocess.TimeoutExpired:
- raise TimeoutError("LibreOffice 转换超时")
- except subprocess.CalledProcessError as e:
- raise RuntimeError(f"LibreOffice 转换失败: {e.output}")
- @staticmethod
- def cleanup_temp_files(temp_input_path, temp_output_dir):
- if os.path.exists(temp_input_path):
- os.remove(temp_input_path)
- if os.path.exists(temp_output_dir):
- shutil.rmtree(temp_output_dir)
- @staticmethod
- def process_excel(file_content, document_id, max_tokens, bucket_name):
- result = []
- collection_name = f"{bucket_name}"
- status=1
- # object1 object1为后续可能添加的字段 因为无法直接修改名称 备用
- source = "知识库"
- object1 = "some_object1"
- object2 = "some_object2"
- def warning_catcher(message, category, filename, lineno, file=None, line=None):
- nonlocal status
- if category == UserWarning:
- if "File contains an invalid specification for 0" in str(message) or \
- "Defined names for sheet index 0 cannot be located" in str(message):
- status = 6
- logger.error(f"Warning: {message}")
- warnings.showwarning = warning_catcher
- try:
- excel_file = pd.ExcelFile(file_content)
- except Exception as e:
- logger.error(f"Error reading Excel file: {str(e)}")
- return result
- try:
- connections.connect("default", host=MILVUS_HOST, port=MILVUS_PORT,user=MILVUS_USER,password=MILVUS_PASSWORD)
- collection = KbmService._get_or_create_collection(collection_name)
- for sheet_name in excel_file.sheet_names:
- df = pd.read_excel(excel_file, sheet_name=sheet_name)
- if df.empty:
- logger.warning(f"Sheet '{sheet_name}' is empty. Skipping.")
- continue
- logger.info(f"Processing sheet '{sheet_name}' with shape {df.shape}")
- markdown_content = KbmService._excel_to_markdown(df, sheet_name)
- chunks = KbmService._split_markdown(markdown_content, max_tokens)
- for chunk_number, chunk_content in enumerate(chunks, start=1):
- try:
- if not chunk_content.strip():
- logger.warning(f"Empty chunk {chunk_number} in sheet '{sheet_name}'. Skipping.")
- continue
- embedding = KbmService.get_embedding_excel(chunk_content, target_dim=VECTOR_DIMENSION)
- if isinstance(embedding, (list, np.ndarray)) and len(embedding) == VECTOR_DIMENSION:
- sleep(1)
- logger.info("减少milvus压力睡眠1秒")
- milvus_id = KbmService._insert_data(collection, chunk_content, embedding,source,object1,object2)
- else:
- logger.error(f"Invalid embedding format for chunk {chunk_number} of sheet {sheet_name}.")
- continue
- KbmService.saveTaskSublist(
- document_id=document_id,
- name=f"sheet_{sheet_name}",
- page_number=1,
- chunk_number=chunk_number,
- content=chunk_content,
- milvus_id=milvus_id
- )
- result.append({
- 'page_number': 1,
- 'chunk_number': chunk_number,
- })
- except Exception as e:
- logger.error(f"Error processing chunk {chunk_number} of sheet {sheet_name}: {str(e)}")
- logger.error(f"Chunk content: {chunk_content}")
- logger.exception("Detailed error information:")
- except Exception as e:
- logger.error(f"Error processing Excel file: {str(e)}")
- logger.error("Detailed error information:")
- logger.error(traceback.format_exc())
- status = 6 # 设置状态为6,表示处理出错
- raise
- finally:
- connections.disconnect("default")
- return result, status
- #excel转markdown
- @staticmethod
- def _excel_to_markdown(df, sheet_name):
- if df.empty:
- return f"# {sheet_name}\n\n表格为空"
- headers = df.columns.tolist()
- data = df.values.tolist()
- # 将所有数据转换为字符串
- data = [[str(cell) for cell in row] for row in data]
- markdown = f"# {sheet_name}\n\n"
- markdown += tabulate(data, headers=headers, tablefmt="pipe", showindex=False)
- return markdown
- #excel分割策略
- @staticmethod
- def _split_json(json_str, max_tokens):
- # 简单的分割策略,可以根据需要优化
- data = json.loads(json_str)
- chunks = []
- current_chunk = []
- current_size = 0
- for item in data:
- item_str = json.dumps(item)
- item_size = len(item_str)
- if current_size + item_size > max_tokens and current_chunk:
- chunks.append(json.dumps(current_chunk))
- current_chunk = []
- current_size = 0
- current_chunk.append(item)
- current_size += item_size
- if current_chunk:
- chunks.append(json.dumps(current_chunk))
- return chunks
- @staticmethod
- def _split_markdown(markdown_content, max_tokens):
- chunks = []
- current_chunk = ""
- lines = markdown_content.split('\n')
- for line in lines:
- if len(current_chunk) + len(line) + 1 > max_tokens:
- if current_chunk:
- chunks.append(current_chunk.strip())
- current_chunk = line
- else:
- current_chunk += '\n' + line if current_chunk else line
- if current_chunk:
- chunks.append(current_chunk.strip())
- return chunks
- #milvus excel插入格式
- @staticmethod
- def _insert_data(collection, content, embedding,source,object1,object2):
- try:
- data = [
- [source], # content field
- [object1], # content field
- [object2], # content field
- [content], # content field
- [embedding] # embedding field
- ]
- insert_result = collection.insert(data)
- logger.info(f"Inserted 1 record into Milvus")
- return insert_result.primary_keys[0] # 返回插入的 ID
- except Exception as e:
- logger.error(f"Error inserting data into Milvus: {str(e)}")
- raise
- #创建milvus索引
- @staticmethod
- def _create_index_if_not_exists(collection):
- if not collection.has_index():
- index_params = {
- "index_type": "IVF_FLAT",
- "metric_type": "L2",
- "params": {"nlist": 768}
- }
- collection.create_index("embedding", index_params)
- @staticmethod
- def process_pdf(file_content, document_id, max_tokens, bucket_name):
- """
- 解析PDF文件并处理每一页的内容。
- Args:
- file_content (BytesIO): PDF文件的内容。
- document_id (str): 文档的ID。
- max_tokens (int): 最大令牌数。
- bucket_name (str): 存储桶的名称。
- Returns:
- list: 处理后的页面列表。
- """
- logger.info(f"开始处理 PDF,document_id: {document_id}, max_tokens: {max_tokens}")
- text_chunks = []
- try:
- sentence_chunks, page_images = pdf_utils.PDFProcessor(1).get_results(file_content, document_id, max_tokens, bucket_name, KbmService)
- # 一张页面 , 一张页面的完这
- for sentence_chunk in sentence_chunks:
- text_chunks.append( KbmService.split_text_by_semantic(sentence_chunk, max_tokens, bucket_name))
- sleep(.5)
- logger.info("分割完成")
- result = []
-
- page_inder = 0
- for i, chunks in enumerate(text_chunks,1):
- inder = 1
- image_name = page_images[i-1][0]
- for j, chunk in enumerate(chunks,1):
- KbmService.saveTaskSublist(
- document_id=document_id,
- name=image_name,
- page_number=i,
- chunk_number=inder,
- content=chunk['content'],
- milvus_id=chunk['milvus_id']
- )
- inder +=1
- page_inder += inder
- result.append({
- 'page_number': i,
- 'chunk_number': page_inder,
- })
- logger.info("解析结束")
- return result
-
- except Exception as e:
- logger.error(f'错误 {str(e)}')
- # raise ValueError(f'{str(e)} 行号:{sys._getframe().f_lineno}')
- return []
- #解析markdown
- @staticmethod
- def process_markdown(file_content, document_id, max_tokens, bucket_name):
- logger.info(f"开始解析 Markdown,document_id: {document_id}, max_tokens: {max_tokens}")
- try:
- # 检测文件编码
- raw_content = file_content.read()
- detected = chardet.detect(raw_content)
- encoding = detected['encoding']
- logger.info(f"检测到的文件编码: {encoding}")
- # 解码文件内容
- text = raw_content.decode(encoding)
- logger.info(f"Markdown 文件总字符数: {len(text)}")
- logger.debug(f"Markdown 文件前100个字符: {text[:100]}")
- # 将 Markdown 转换为 HTML
- html = markdown.markdown(text)
- # 使用 BeautifulSoup 提取纯文本
- soup = BeautifulSoup(html, 'html.parser')
- plain_text = soup.get_text()
- logger.info(f"提取的纯文本总字符数: {len(plain_text)}")
- logger.debug(f"提取的纯文本前100个字符: {plain_text[:100]}")
- if not plain_text.strip():
- logger.warning("Markdown 文件内容为空")
- return []
- sentence_chunks = pdf_utils.PDFProcessor(1).split_sentences(plain_text)
- # 使用 split_text_by_semantic 方法分割文本
- text_chunks = KbmService.split_text_by_semantic(sentence_chunks, max_tokens, bucket_name)
- logger.info(f"分割后的文本块数: {len(text_chunks)}")
- result = []
- for i, chunk in enumerate(text_chunks, 1):
- KbmService.saveTaskSublist(
- document_id=document_id,
- name="markdown_content",
- page_number=1,
- chunk_number=i,
- content=chunk['content'],
- milvus_id=chunk['milvus_id']
- )
- result.append({
- 'page_number': 1,
- 'chunk_number': i,
- })
- logger.info(f"Markdown 处理完成,总共生成 {len(result)} 个文本块")
- return result
- except Exception as e:
- logger.error(f"处理 Markdown 时发生错误: {str(e)}")
- logger.exception("详细错误信息:")
- return []
- @staticmethod
- def split_text(text, max_tokens):
- words = text.split()
- chunks = []
- current_chunk = []
- current_token_count = 0
- for word in words:
- word_tokens = KbmService.estimate_tokens(word)
- if current_token_count + word_tokens > max_tokens and current_chunk:
- chunks.append(' '.join(current_chunk))
- current_chunk = []
- current_token_count = 0
- current_chunk.append(word)
- current_token_count += word_tokens
- if current_chunk:
- chunks.append(' '.join(current_chunk))
- return chunks
- @staticmethod
- def estimate_tokens(text):
- return len(re.findall(r'\w+', text)) * 1.3
-
- @staticmethod
- def extract_text_from_image(image_data):
- try:
- if isinstance(image_data, BytesIO):
- image_data = image_data.getvalue()
- # 读取图像
- nparr = np.frombuffer(image_data, np.uint8)
- image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
- # 图像预处理
- preprocessed = KbmService.preprocess_image(image)
- # 使用进行识别
- # result = Paddle_OCR.ocr(preprocessed, cls=True)
- result = ocr_utils.OCRProcesso().ocr(preprocessed)
- # 后处理
- text = KbmService.post_process_text(result)
- if text and len(text) > 2: # 假设有意义的文本至少有3个字符
- logger.info(f"提取的文本长度: {len(text)} {text}")
- return text
- else:
- logger.info("提取的内容似乎是图像,而不是文本")
- return "图片"
- except Exception as e:
- logger.error(f"从图像提取文本时出错: {str(e)} {sys._getframe().f_lineno}")
- return "图片"
- @staticmethod
- def preprocess_image(image):
- # 转换为灰度图像
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
- # 自适应阈值处理
- binary = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
- # 对二值图像进行膨胀操作,使文字更粗
- kernel = np.ones((2, 2), np.uint8)
- dilated = cv2.dilate(binary, kernel, iterations=1)
- return dilated
- def post_process_text(text):
- if text:
- # 将连续的冒号或点替换为空格
- text = re.sub(r'[:.]+', ' ', text)
- # 保留中文字符、英文字母、数字、常用标点
- text = re.sub(r'[^\u4e00-\u9fff\u3000-\u303fa-zA-Z0-9.,!?;:()"\'\s]', '', text)
- # 删除连续的数字(3个或更多)
- text = re.sub(r'\d{3,}', '', text)
- # 处理多余的空白字符
- text = re.sub(r'\s+', ' ', text).strip()
- # 删除单独的数字,但保留章节编号和有意义的数字
- text = re.sub(r'\b(?<![第章])\d+(?!\d)\b', '', text)
- # 清理多余的空格
- text = re.sub(r'\s+', ' ', text).strip()
- return text
- @staticmethod
- def render_page_to_image(page, scale=2):
- # logger.info(page)
- try:
- pix = page.get_pixmap(matrix=fitz.Matrix(scale, scale))
- img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
- buffered = BytesIO()
- img.save(buffered, format="PNG")
- # image_name = rf"E:/page_image_{uuid.uuid4()}.png"
- # img.save(image_name)
- buffered.seek(0)
- return buffered
- except Exception as e:
- logger.error('文件数据不正确')
- raise ValueError("文件数据不正确")
-
- @staticmethod
- def save_image_to_minio(image_data, bucket_name):
- image_name = f"page_image_{uuid.uuid4()}.png"
- minio_client.put_object(bucket_name, image_name, image_data, length=image_data.getbuffer().nbytes)
- return image_name
-
- @staticmethod
- @transaction.atomic
- def saveTask(document_id, total_chunks):
- Task.objects.update_or_create(
- doc_id=document_id, # 查找条件
- defaults={'to_page': total_chunks} # 要更新或创建的字段
- )
- @staticmethod
- @transaction.atomic
- def saveTaskSublist(document_id, name, page_number=None, chunk_number=None, content=None,milvus_id=None):
- try:
- # 确保 content 是 Unicode 字符串
- if content is not None:
- if isinstance(content, bytes):
- content = content.decode('utf-8')
- else:
- content = str(content)
- TaskSublist.objects.create(
- doc_id=document_id,
- name=name,
- page_number=str(page_number) if page_number is not None else '0',
- chunk_number=str(chunk_number) if chunk_number is not None else '0',
- content=content,
- milvus_id=milvus_id
- )
- logger.info(f"Successfully saved TaskSublist for document {document_id}, chunk {chunk_number}")
- except Exception as e:
- logger.error(f"Error saving TaskSublist: {str(e)}")
- # 可以选择在这里重新抛出异常,或者进行其他错误处理
- @staticmethod
- @transaction.atomic
- def clearPreviousData(document_id,bucket_name):
- try:
- # 获取与文档相关的所有 TaskSublist,milvus 记录
- task_sublists = TaskSublist.objects.filter(doc_id=document_id)
- # 获取集合对象
- connections.connect("default", host=MILVUS_HOST, port=MILVUS_PORT,user=MILVUS_USER,password=MILVUS_PASSWORD)
- milvus_collection_exists = utility.has_collection(bucket_name)
- if milvus_collection_exists:
- collection = Collection(bucket_name)
- # 连接到 Milvus
- # 从 MinIO 中删除相关的图片
- for task in task_sublists:
- try:
- # 执行删除minio
- minio_client.remove_object(bucket_name, task.name)
- # 执行删除milvus
- if milvus_collection_exists and task.milvus_id:
- expr = f'id in [{task.milvus_id}]'
- collection.delete(expr)
- except Exception as e:
- logger.error(f"Error deleting object from MinIO: {e}")
- # 从数据库中删除 TaskSublist 记录
- task_sublists.delete()
- except Exception as e:
- logger.error(f"Error deleting object from MinIO: {str(e)} {sys._getframe().f_lineno}")
- finally:
- connections.disconnect("default")
- @staticmethod
- @transaction.atomic
- def updateDocument(max_tokens, total_chunks, document_id,execution_time):
- try:
- # 检查是否存在相关的 TaskSublist
- count = TaskSublist.objects.filter(doc_id=document_id).count()
- # 根据 TaskSublist 的存在与否设置运行状态
- progress_status = 1 if count > 0 else -1
- # 更新 DocumentKbm 对象
- updated = DocumentKbm.objects.filter(id=document_id).update(
- token_num=max_tokens,
- chunk_num=total_chunks,
- progress=progress_status,
- process_begin_at=timezone.now(),
- process_duation= execution_time
- )
- if updated:
- return True, f"Document {document_id} updated successfully."
- else:
- return False, f"Document {document_id} not found."
- except Exception as e:
- # 如果发生任何错误,事务会自动回滚
- return False, f"Error updating document: {str(e)}"
- #异步调用
- @staticmethod
- def searchTaskInfo(request):
- document_id = request.POST.get("document_id")
- page = request.POST.get('page', 1)
- page_size = request.POST.get('page_size', 10) # 每页显示的项目数,默认为10
- taskSublists = TaskSublist.objects.filter(doc_id=document_id).order_by('id')
- document = get_object_or_404(DocumentKbm, id=document_id)
- location = document.location
- knowledgebase = get_object_or_404(Knowledgebase, id=document.kb_id)
- bucket_name = knowledgebase.location
- documentUrl = minio_client.presigned_get_object(
- bucket_name=bucket_name,
- object_name=location,
- expires=timedelta(days=1) # URL有效期为1天
- )
- # 创建分页器
- paginator = Paginator(taskSublists, page_size)
- try:
- tasks_page = paginator.page(page)
- except PageNotAnInteger:
- # 如果页码不是整数,返回第一页
- tasks_page = paginator.page(1)
- except EmptyPage:
- # 如果页码超出范围,返回最后一页
- tasks_page = paginator.page(paginator.num_pages)
- task_results = []
- for task in tasks_page:
- try:
- # 生成MinIO对象的预签名URL
- url = minio_client.presigned_get_object(
- bucket_name=bucket_name,
- object_name=task.name,
- expires=timedelta(days=1) # URL有效期为1天
- )
- task_results.append({
- 'id': task.id,
- 'doc_id': task.doc_id,
- 'name': task.name,
- 'page_number': task.page_number,
- 'chunk_number': task.chunk_number,
- 'content': task.content,
- 'url': url
- })
- except Exception as e:
- logger.error(f"Error generating URL for object {task.name}: {str(e)}")
- # 如果生成URL失败,我们仍然添加其他信息,但URL为None
- task_results.append({
- 'id': task.id,
- 'doc_id': task.doc_id,
- 'name': task.name,
- 'page_number': task.page_number,
- 'chunk_number': task.chunk_number,
- 'content': task.content,
- 'url': None
- })
- # 创建包含 documentUrl 和分页信息的最终结果
- result = {
- 'documentUrl': documentUrl,
- 'tasks': task_results,
- 'pagination': {
- 'current_page': tasks_page.number,
- 'num_pages': paginator.num_pages,
- 'per_page': page_size,
- 'total_count': paginator.count,
- 'has_next': tasks_page.has_next(),
- 'has_previous': tasks_page.has_previous(),
- }
- }
- return success(result)
- @staticmethod
- @transaction.atomic
- def deleteBucket(request):
- bucket_id = request.POST.get("bucket_id")
- if not bucket_id:
- return fail("Bucket ID 为空")
- try:
- # 获取知识库对象
- try:
- knowledgebase = Knowledgebase.objects.get(id=bucket_id)
- except ObjectDoesNotExist:
- return fail("指定的知识库不存在")
- # 检查是否存在文档
- active_docs_count = DocumentKbm.objects.filter(kb_id=bucket_id).count()
- if active_docs_count > 0:
- return fail(f"无法删除知识库,还有 {active_docs_count} 个文档")
- # 删除相关的 File 记录
- File.objects.filter(
- name=knowledgebase.name,
- source_type='knowledgebase',
- type='folder'
- ).delete()
- # 删除所有相关的 DocumentKbm 记录
- DocumentKbm.objects.filter(kb_id=bucket_id).delete()
- # 删除知识库
- knowledgebase.delete()
- # 可选:如果使用 MinIO 存储文件,删除 MinIO 中的文件
- try:
- minio_client.remove_bucket(bucket_id)
- except Exception as e:
- logger.warning(f"Failed to delete MinIO bucket {bucket_id}: {str(e)}")
- return success("知识库已成功删除")
- except Exception as e:
- logger.error(f"删除知识库时发生错误: {str(e)}", exc_info=True)
- return fail(f"删除知识库时发生错误: {str(e)}")
- # bucket_id = request.POST.get("bucket_id")
- # if not bucket_id:
- # return fail("Bucket ID 为空")
- # try:
- # # 检查是否存在未删除的文档
- # active_docs_count = DocumentKbm.objects.filter(kb_id=bucket_id).exclude(status=4).count()
- # if active_docs_count > 0:
- # return fail(f"无法删除知识库,还有 {active_docs_count} 个未删除的文档")
- # # 如果没有未删除的文档,则更新知识库状态
- # updated_count = Knowledgebase.objects.filter(id=bucket_id).update(status=4,name=bucket_id, location=bucket_id)
- # if updated_count == 0:
- # return fail("指定的知识库不存在")
- # return success("知识库已成功删除")
- # except Exception as e:
- # return fail(f"删除知识库时发生错误: {str(e)}")
- @staticmethod
- def getRunStatus(request):
- document_id = request.POST.get("document_id")
- run = DocumentKbm.objects.filter(id = document_id).values("run").first()
-
- return success(run)
- @staticmethod
- def batchAnalysis(request):
- ids_str = request.POST.get("ids")
- start_page = int(request.POST.get('start_page', 1))
- end_page = int(request.POST.get('end_page', -1))
- max_tokens = int(request.POST.get('max_tokens', 2048))
- try:
- # 尝试将字符串解析为 JSON 列表
- ids = json.loads(ids_str)
- if not isinstance(ids, list):
- return fail("无效输入:'ids'应该是一个列表")
- results = []
- for document_id in ids:
- sleep(0.1)
- logger.info("缓解压力沉睡0.1秒")
- # 为每个 document_id 创建一个新的请求对象
- analysis_request = type('AnalysisRequest', (), {})()
- analysis_request.POST = {
- 'document_id': document_id,
- 'start_page': start_page,
- 'end_page': end_page,
- 'max_tokens': max_tokens
- }
- # 调用 analysis 方法
- response = KbmService.analysis(analysis_request)
- message = response.get('message')
- return success(message,"已添加到队列")
- except Exception as e:
- return fail(f"An error occurred: {str(e)}")
- # 假设这是您支持的文件后缀名列表
- SUPPORTED_SUFFIXES = [
- 'txt', 'pdf', 'doc', 'docx', 'xls', 'xlsx', 'md'
- ]
- @staticmethod
- def getSuffixName(request):
- try:
- # 获取数据库中的所有不重复的 type 值
- db_types = DocumentKbm.objects.values_list('type', flat=True).distinct()
- # 将数据库中的类型转换为集合
- db_types_set = set(db_types)
- # 将 SUPPORTED_SUFFIXES 转换为集合
- supported_set = set(KbmService.SUPPORTED_SUFFIXES)
- # 合并两个集合,自动去除重复项
- combined_set = supported_set.union(db_types_set)
- # 将结果转换回列表
- combined_suffixes = list(combined_set)
- # 对结果进行排序(可选)
- combined_suffixes.sort()
- return success(combined_suffixes)
- except Exception as e:
- return fail(f"获取文件后缀名时发生错误: {str(e)}")
- @staticmethod
- @transaction.atomic
- def batchMove(request):
- ids = json.loads(request.POST.get("ids"))
- doc_type_id = request.POST.get("doc_type_id")
- if not doc_type_id:
- return fail("分类id为空")
- if not ids:
- return fail("未传出文件id")
- type = KbmDocumentType.objects.filter(id=doc_type_id).exclude(status=4).first()
- if not type:
- return fail("当前分类不存在")
- DocumentKbm.objects.filter(id__in=ids).update(doc_type_id=doc_type_id)
- return success("批量移动成功")
- @staticmethod
- @transaction.atomic
- def moveDocument(doc_id, doc_type_id):
- # 这个方法可以保持不变,因为它已经是单次更新操作
- return DocumentKbm.objects.filter(id=doc_id).update(doc_type_id=doc_type_id)
-
- @staticmethod
- @transaction.atomic
- def updateKbm(request):
- """
- 更新知识库信息
- Args:
- request (HttpRequest): 请求对象
- Returns:
- dict: 成功或失败的响应字典
- """
- id = request.POST.get("id")
- if not id:
- return fail("id为空")
- kmb = Knowledgebase.objects.filter(id=id).first()
- name = request.POST.get("name")
- if not name:
- return fail("名称不能为空")
- if kmb:
- kmb.name = name
- kmb.description = request.POST.get("description","")
- kmb.save()
- return success("修改成功")
- else:
- return fail("修改失败")
|