123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123 |
- from backend.Service import KbmService
- from backend.models import DocumentKbm
- from base import logger
- from backend.Service.KbmService import KbmService
- from .. import *
- class DocumentProcessing:
- @staticmethod
- def process_document(document_id, start_page, end_page, max_tokens):
- try:
- document = DocumentKbm.objects.get(id=document_id)
- document.run = 1 # 更新状态为处理中
- document.save()
-
- logger.info('处理事务状态 KbmService async_analysis 开始')
- KbmService.async_analysis(document_id, start_page, end_page, max_tokens)
- logger.info('处理事务状态 KbmService async_analysis 完成·')
- document.run = 3 # 更新状态为处理完成
- document.save()
- except Exception as e:
- logger.error(f"处理文档 {document_id} 时发生错误: {str(e)}")
- document = DocumentKbm.objects.get(id=document_id)
- document.run = 4 # 更新状态为处理失败
- document.save()
- raise
- # 新rabbitmq队列
- @staticmethod
- def analysis(request):
- document_id = request.POST.get("document_id")
- start_page = int(request.POST.get('start_page', 1))
- end_page = int(request.POST.get('end_page', -1))
- max_tokens = int(request.POST.get('max_tokens', 2048))
- if max_tokens == 0:
- max_tokens = 2048
- logger.info(f"开始处理文档 ID: {document_id}")
- try:
- document = DocumentKbm.objects.get(id=document_id)
- if int(document.run) in [1, 5]: # 1: 处理中, 5: 等待处理
- logger.info(f"文档 {document_id} 已有队列")
- return success("文档正在处理中或已经处理完成")
- # 准备消息
- message = {
- 'document_id': document_id,
- 'start_page': start_page,
- 'end_page': end_page,
- 'max_tokens': max_tokens
- }
- # 发送消息到队列
- if KbmService.send_to_rabbitmq(settings.RABBITMQ_QUEUE_NAME, message):
- # 更新文档状态为等待处理
- document.run = 5 # 5表示等待处理
- document.save()
- logger.info(f"文档 {document_id} 状态已更新为等待处理")
- return success("文档已添加到处理队列")
- else:
- document.run = 4
- document.save()
- return fail("添加文档到处理队列失败")
- except DocumentKbm.DoesNotExist:
- logger.error(f"文档 {document_id} 不存在")
- document.run = 4
- document.save()
- return fail("文档不存在")
- except Exception as e:
- logger.error(f"处理文档 {document_id} 时出错: {str(e)}")
- document.run = 4
- document.save()
- return fail("处理文档时出错")
- semaphore = threading.Semaphore(4)
- # @staticmethod
- # def process_queue():
- # logger.info("开始监测RabbitMQ队列")
- # connection = pika.BlockingConnection(pika.ConnectionParameters(
- # host=settings.RABBITMQ_HOST,
- # port=settings.RABBITMQ_PORT,
- # credentials=pika.PlainCredentials(
- # settings.RABBITMQ_USER,
- # settings.RABBITMQ_PASSWORD
- # )
- # ))
- # channel = connection.channel()
- # channel.queue_declare(queue=settings.RABBITMQ_QUEUE_NAME, durable=True)
- #
- # def callback(ch, method, properties, body):
- # with KbmService.semaphore:
- # try:
- # job = json.loads(body)
- # document_id = job['document_id']
- # start_page = job['start_page']
- # end_page = job['end_page']
- # max_tokens = job['max_tokens']
- #
- # logger.info(f"开始执行解析文档 {document_id}")
- # KbmService.async_analysis(document_id, start_page, end_page, max_tokens)
- #
- # # 处理成功,确认消息
- # ch.basic_ack(delivery_tag=method.delivery_tag)
- # except Exception as e:
- # logger.error(f"处理队列消息时发生错误: {str(e)}")
- # # 处理失败,拒绝消息并重新入队
- # ch.basic_nack(delivery_tag=method.delivery_tag, requeue=True)
- #
- # # 设置预取计数为4,与最大并发数相匹配
- # channel.basic_qos(prefetch_count=4)
- # channel.basic_consume(queue=settings.RABBITMQ_QUEUE_NAME, on_message_callback=callback)
- #
- # logger.info('等待队列消息。要退出请按 CTRL+C')
- # channel.start_consuming()
- connection = None
- channel = None
- should_stop = False
|