同步更新:重构路由、服务模块,更新前端构建

This commit is contained in:
2025-12-14 21:47:08 +08:00
parent e01a7b5235
commit a346509a5f
87 changed files with 9186 additions and 7826 deletions

2
services/__init__.py Normal file
View File

@@ -0,0 +1,2 @@
"""Service layer package."""

View File

@@ -0,0 +1,26 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
import database
from services.models import Account
from services.state import safe_set_user_accounts
def load_user_accounts(user_id: int) -> None:
"""从数据库加载用户的账号到内存(保持原逻辑不变)。"""
accounts_by_id = {}
accounts_data = database.get_user_accounts(user_id)
for acc_data in accounts_data:
account = Account(
account_id=acc_data["id"],
user_id=user_id,
username=acc_data["username"],
password=acc_data["password"],
remember=bool(acc_data["remember"]),
remark=acc_data["remark"] or "",
)
accounts_by_id[account.id] = account
safe_set_user_accounts(user_id, accounts_by_id)

26
services/browse_types.py Normal file
View File

@@ -0,0 +1,26 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
BROWSE_TYPE_SHOULD_READ = "应读"
BROWSE_TYPE_PRE_REG_UNREAD = "注册前未读"
_BROWSE_TYPES_ALLOWED_INPUT = {BROWSE_TYPE_SHOULD_READ, BROWSE_TYPE_PRE_REG_UNREAD}
def normalize_browse_type(value, default: str = BROWSE_TYPE_SHOULD_READ) -> str:
text = str(value or "").strip()
if text == BROWSE_TYPE_PRE_REG_UNREAD:
return BROWSE_TYPE_PRE_REG_UNREAD
if text == BROWSE_TYPE_SHOULD_READ:
return BROWSE_TYPE_SHOULD_READ
return default
def validate_browse_type(value, default: str = BROWSE_TYPE_SHOULD_READ):
text = str(value if value is not None else default).strip()
if text not in _BROWSE_TYPES_ALLOWED_INPUT:
return None
return normalize_browse_type(text, default=default)

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
import threading
from typing import Optional
from app_logger import get_logger
from browser_installer import check_and_install_browser
from playwright_automation import PlaywrightBrowserManager
logger = get_logger("browser_manager")
_browser_manager: Optional[PlaywrightBrowserManager] = None
_lock = threading.Lock()
def get_browser_manager() -> Optional[PlaywrightBrowserManager]:
return _browser_manager
def init_browser_manager() -> bool:
global _browser_manager
with _lock:
if _browser_manager is not None:
return True
logger.info("正在初始化Playwright浏览器管理器...")
if not check_and_install_browser(log_callback=lambda msg, account_id=None: logger.info(str(msg))):
logger.error("浏览器环境检查失败!")
return False
_browser_manager = PlaywrightBrowserManager(
headless=True,
log_callback=lambda msg, account_id=None: logger.info(str(msg)),
)
logger.info("Playwright浏览器管理器创建成功")
return True

21
services/checkpoints.py Normal file
View File

@@ -0,0 +1,21 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import Optional
from task_checkpoint import TaskStage, get_checkpoint_manager
_checkpoint_mgr = None # type: Optional[object]
def init_checkpoint_manager():
global _checkpoint_mgr
if _checkpoint_mgr is None:
_checkpoint_mgr = get_checkpoint_manager()
return _checkpoint_mgr
def get_checkpoint_mgr():
return init_checkpoint_manager()

31
services/client_log.py Normal file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from app_logger import LoggerAdapter
from app_security import escape_html
from services.runtime import get_socketio
from services.state import safe_add_log
from services.time_utils import get_beijing_now
def log_to_client(message, user_id=None, account_id=None):
"""发送日志到Web客户端(用户隔离) + 统一输出到 logger。"""
timestamp = get_beijing_now().strftime('%H:%M:%S')
log_data = {
'timestamp': timestamp,
'message': escape_html(str(message)) if message else '',
'account_id': account_id
}
if user_id:
safe_add_log(user_id, log_data)
get_socketio().emit('log', log_data, room=f'user_{user_id}')
ctx = {}
if user_id is not None:
ctx["user_id"] = user_id
if account_id:
ctx["account_id"] = account_id
LoggerAdapter("app", ctx).info(str(message) if message is not None else "")

108
services/maintenance.py Normal file
View File

@@ -0,0 +1,108 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
import threading
import time
from app_config import get_config
from app_logger import get_logger
from services.state import (
cleanup_expired_ip_rate_limits,
safe_cleanup_expired_batches,
safe_cleanup_expired_captcha,
safe_cleanup_expired_pending_random,
safe_get_user_accounts_last_access_items,
safe_has_user,
safe_iter_task_status_items,
safe_remove_task_status,
safe_remove_user_accounts,
)
logger = get_logger("app")
config = get_config()
USER_ACCOUNTS_EXPIRE_SECONDS = int(getattr(config, "USER_ACCOUNTS_EXPIRE_SECONDS", 3600))
BATCH_TASK_EXPIRE_SECONDS = int(getattr(config, "BATCH_TASK_EXPIRE_SECONDS", 21600))
PENDING_RANDOM_EXPIRE_SECONDS = int(getattr(config, "PENDING_RANDOM_EXPIRE_SECONDS", 7200))
def cleanup_expired_data() -> None:
"""定期清理过期数据,防止内存泄漏(逻辑保持不变)。"""
current_time = time.time()
deleted_captchas = safe_cleanup_expired_captcha(current_time)
if deleted_captchas:
logger.debug(f"已清理 {deleted_captchas} 个过期验证码")
deleted_ips = cleanup_expired_ip_rate_limits(current_time)
if deleted_ips:
logger.debug(f"已清理 {deleted_ips} 个过期IP限流记录")
expired_users = []
last_access_items = safe_get_user_accounts_last_access_items()
if last_access_items:
task_items = safe_iter_task_status_items()
active_user_ids = {int(info.get("user_id")) for _, info in task_items if info.get("user_id")}
for user_id, last_access in last_access_items:
if (current_time - float(last_access)) <= USER_ACCOUNTS_EXPIRE_SECONDS:
continue
if int(user_id) in active_user_ids:
continue
if safe_has_user(user_id):
expired_users.append(int(user_id))
for user_id in expired_users:
safe_remove_user_accounts(user_id)
if expired_users:
logger.debug(f"已清理 {len(expired_users)} 个过期用户账号缓存")
completed_tasks = []
for account_id, status_data in safe_iter_task_status_items():
if status_data.get("status") in ["已完成", "失败", "已停止"]:
start_time = float(status_data.get("start_time", 0) or 0)
if (current_time - start_time) > 600: # 10分钟
completed_tasks.append(account_id)
for account_id in completed_tasks:
safe_remove_task_status(account_id)
if completed_tasks:
logger.debug(f"已清理 {len(completed_tasks)} 个已完成任务状态")
try:
import os
while True:
try:
pid, status = os.waitpid(-1, os.WNOHANG)
if pid == 0:
break
logger.debug(f"已回收僵尸进程: PID={pid}")
except ChildProcessError:
break
except Exception:
pass
deleted_batches = safe_cleanup_expired_batches(BATCH_TASK_EXPIRE_SECONDS, current_time)
if deleted_batches:
logger.debug(f"已清理 {deleted_batches} 个过期批次任务缓存")
deleted_random = safe_cleanup_expired_pending_random(PENDING_RANDOM_EXPIRE_SECONDS, current_time)
if deleted_random:
logger.debug(f"已清理 {deleted_random} 个过期随机延迟任务")
def start_cleanup_scheduler() -> None:
"""启动定期清理调度器"""
def cleanup_loop():
while True:
try:
time.sleep(300) # 每5分钟执行一次清理
cleanup_expired_data()
except Exception as e:
logger.error(f"清理任务执行失败: {e}")
cleanup_thread = threading.Thread(target=cleanup_loop, daemon=True, name="cleanup-scheduler")
cleanup_thread.start()
logger.info("内存清理调度器已启动")

106
services/models.py Normal file
View File

@@ -0,0 +1,106 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from flask_login import UserMixin
from services.state import safe_get_task_status
class User(UserMixin):
"""Flask-Login 用户类"""
def __init__(self, user_id: int):
self.id = user_id
class Admin(UserMixin):
"""管理员类"""
def __init__(self, admin_id: int):
self.id = admin_id
self.is_admin = True
class Account:
"""账号类(用于内存缓存与任务执行)"""
def __init__(
self,
account_id: str,
user_id: int,
username: str,
password: str,
remember: bool = True,
remark: str = "",
):
self.id = account_id
self.user_id = user_id
self.username = username
self._password = password
self.remember = remember
self.remark = remark or ""
# 运行状态
self.is_running = False
self.should_stop = False
self.status = "未开始"
# 任务数据
self.total_items = 0
self.total_attachments = 0
self.last_browse_type = ""
# 浏览器自动化对象(主任务)
self.automation = None
# 代理配置(浏览与截图共用)
self.proxy_config = None
@property
def password(self) -> str:
return self._password
def __repr__(self) -> str:
return f"Account(id={self.id}, username={self.username}, status={self.status})"
def to_dict(self) -> dict:
result = {
"id": self.id,
"username": self.username,
"status": self.status,
"remark": self.remark,
"total_items": self.total_items,
"total_attachments": self.total_attachments,
"is_running": self.is_running,
}
ts = safe_get_task_status(self.id)
if ts:
progress = ts.get("progress", {}) or {}
result["detail_status"] = ts.get("detail_status", "")
result["progress_items"] = progress.get("items", 0)
result["progress_attachments"] = progress.get("attachments", 0)
result["start_time"] = ts.get("start_time", 0)
if ts.get("start_time"):
import time
elapsed = int(time.time() - ts["start_time"])
result["elapsed_seconds"] = elapsed
mins, secs = divmod(elapsed, 60)
result["elapsed_display"] = f"{mins}{secs}"
else:
status_map = {
"已完成": "任务完成",
"截图中": "正在截图",
"浏览完成": "浏览完成",
"登录失败": "登录失败",
"已暂停": "任务已暂停",
}
for key, val in status_map.items():
if key in self.status:
result["detail_status"] = val
break
return result

77
services/proxy.py Normal file
View File

@@ -0,0 +1,77 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
import re
import time
from typing import Optional
import requests
from app_logger import get_logger
logger = get_logger("proxy")
def validate_ip_port(ip_port_str: str) -> bool:
"""验证IP:PORT格式是否有效含范围校验"""
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):(\d{1,5})$")
match = pattern.match(ip_port_str or "")
if not match:
return False
for i in range(1, 5):
octet = int(match.group(i))
if octet < 0 or octet > 255:
return False
port = int(match.group(5))
return 1 <= port <= 65535
def get_proxy_from_api(api_url: str, max_retries: int = 3) -> Optional[str]:
"""从API获取代理IP支持重试"""
ip_port_pattern = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}$")
max_retries = max(1, int(max_retries or 1))
for attempt in range(max_retries):
try:
response = requests.get(api_url, timeout=10)
if response.status_code == 200:
text = response.text.strip()
# 尝试解析JSON响应
try:
import json
data = json.loads(text)
if isinstance(data, dict):
if data.get("status") not in (200, 0, None):
error_msg = data.get("msg", data.get("message", "未知错误"))
logger.warning(f"代理API返回错误: {error_msg} (尝试 {attempt + 1}/{max_retries})")
if attempt < max_retries - 1:
time.sleep(1)
continue
ip_port = data.get("ip") or data.get("proxy") or data.get("data")
if ip_port:
text = str(ip_port).strip()
except Exception:
pass
if ip_port_pattern.match(text) and validate_ip_port(text):
proxy_server = f"http://{text}"
logger.info(f"获取代理成功: {proxy_server} (尝试 {attempt + 1}/{max_retries})")
return proxy_server
logger.warning(f"代理格式或范围无效: {text[:50]} (尝试 {attempt + 1}/{max_retries})")
else:
logger.warning(f"获取代理失败: HTTP {response.status_code} (尝试 {attempt + 1}/{max_retries})")
except Exception as e:
logger.warning(f"获取代理异常: {str(e)} (尝试 {attempt + 1}/{max_retries})")
if attempt < max_retries - 1:
time.sleep(1)
logger.warning(f"获取代理失败,已重试 {max_retries} 次,将不使用代理继续")
return None

36
services/runtime.py Normal file
View File

@@ -0,0 +1,36 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
运行时依赖注入:用于 services/routes 访问 socketio/logger 等全局实例。
说明:
- 仅在 app.py 启动装配时调用 init_runtime()
- 业务模块中避免直接 import app.py统一通过本模块获取依赖
"""
from __future__ import annotations
from typing import Any, Optional
_socketio: Optional[Any] = None
_logger: Optional[Any] = None
def init_runtime(*, socketio: Any, logger: Any) -> None:
global _socketio, _logger
_socketio = socketio
_logger = logger
def get_socketio() -> Any:
if _socketio is None:
raise RuntimeError("socketio 未初始化(请先在 app.py 调用 init_runtime")
return _socketio
def get_logger() -> Any:
if _logger is None:
raise RuntimeError("logger 未初始化(请先在 app.py 调用 init_runtime")
return _logger

113
services/schedule_utils.py Normal file
View File

@@ -0,0 +1,113 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
import random
from datetime import datetime, timedelta
from typing import Iterable, Optional, Set, Tuple
from services.time_utils import BEIJING_TZ
def _parse_hhmm(value: str, default: Tuple[int, int] = (8, 0)) -> Tuple[int, int]:
text = str(value or "").strip()
if ":" not in text:
return default
try:
h, m = text.split(":", 1)
hour = int(h)
minute = int(m)
if 0 <= hour <= 23 and 0 <= minute <= 59:
return hour, minute
except Exception:
pass
return default
def _parse_weekdays(value: str, default: Iterable[int] = (1, 2, 3, 4, 5)) -> Set[int]:
text = str(value or "").strip()
days = []
for part in text.split(","):
part = part.strip()
if not part:
continue
try:
day = int(part)
except Exception:
continue
if 1 <= day <= 7:
days.append(day)
return set(days) if days else set(default)
def _parse_cst_datetime(value: Optional[str]) -> Optional[datetime]:
if not value:
return None
text = str(value).strip()
if not text:
return None
try:
naive = datetime.strptime(text, "%Y-%m-%d %H:%M:%S")
return BEIJING_TZ.localize(naive)
except Exception:
return None
def compute_next_run_at(
*,
now: datetime,
schedule_time: str,
weekdays: str,
random_delay: int = 0,
last_run_at: Optional[str] = None,
) -> datetime:
"""
计算下一次实际执行时间北京时间aware datetime
规则:
- weekday 过滤1=周一..7=周日)
- random_delay=1 时:在 [schedule_time-15min, schedule_time+15min] 内随机
- 同一天只执行一次:若 last_run_at 是今天,则 next_run_at 至少是下一可用日
"""
if now.tzinfo is None:
now = BEIJING_TZ.localize(now)
hour, minute = _parse_hhmm(schedule_time, default=(8, 0))
allowed_weekdays = _parse_weekdays(weekdays, default=(1, 2, 3, 4, 5))
random_delay = 1 if int(random_delay or 0) == 1 else 0
last_run_dt = _parse_cst_datetime(last_run_at)
last_run_date = last_run_dt.date() if last_run_dt else None
base_today = now.replace(hour=hour, minute=minute, second=0, microsecond=0)
for day_offset in range(0, 14):
day = (base_today + timedelta(days=day_offset)).date()
if last_run_date is not None and day == last_run_date:
continue
candidate_base = base_today.replace(year=day.year, month=day.month, day=day.day)
if candidate_base.isoweekday() not in allowed_weekdays:
continue
if random_delay:
window_start = candidate_base - timedelta(minutes=15)
random_minutes = random.randint(0, 30)
candidate = window_start + timedelta(minutes=random_minutes)
else:
candidate = candidate_base
if candidate <= now:
continue
return candidate
# 兜底:找不到则推迟一天
return now + timedelta(days=1)
def format_cst(dt: datetime) -> str:
"""格式化为 DB 存储用的 CST 字符串。"""
if dt.tzinfo is None:
dt = BEIJING_TZ.localize(dt)
dt = dt.astimezone(BEIJING_TZ)
return dt.strftime("%Y-%m-%d %H:%M:%S")

389
services/scheduler.py Normal file
View File

@@ -0,0 +1,389 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
import threading
import time
from datetime import timedelta
import database
import email_service
from app_config import get_config
from app_logger import get_logger
from services.accounts_service import load_user_accounts
from services.browse_types import BROWSE_TYPE_SHOULD_READ, normalize_browse_type
from services.state import (
safe_cleanup_expired_captcha,
safe_create_batch,
safe_finalize_batch_after_dispatch,
safe_get_account,
safe_get_user_accounts_snapshot,
)
from services.task_batches import _send_batch_task_email_if_configured
from services.tasks import submit_account_task
from services.time_utils import get_beijing_now
logger = get_logger("app")
config = get_config()
SCREENSHOTS_DIR = config.SCREENSHOTS_DIR
os.makedirs(SCREENSHOTS_DIR, exist_ok=True)
def run_scheduled_task(skip_weekday_check: bool = False) -> None:
"""执行所有账号的浏览任务(可被手动调用,过滤重复账号)"""
try:
config_data = database.get_system_config()
browse_type = normalize_browse_type(config_data.get("schedule_browse_type", BROWSE_TYPE_SHOULD_READ))
if not skip_weekday_check:
now_beijing = get_beijing_now()
current_weekday = now_beijing.isoweekday()
schedule_weekdays = config_data.get("schedule_weekdays", "1,2,3,4,5,6,7")
allowed_weekdays = [int(d.strip()) for d in schedule_weekdays.split(",") if d.strip()]
if current_weekday not in allowed_weekdays:
weekday_names = ["", "周一", "周二", "周三", "周四", "周五", "周六", "周日"]
logger.info(f"[定时任务] 今天是{weekday_names[current_weekday]},不在执行日期内,跳过执行")
return
else:
logger.info("[立即执行] 跳过星期检查,强制执行任务")
logger.info(f"[定时任务] 开始执行 - 浏览类型: {browse_type}")
all_users = database.get_all_users()
approved_users = [u for u in all_users if u["status"] == "approved"]
executed_usernames = set()
total_accounts = 0
skipped_duplicates = 0
executed_accounts = 0
cfg = database.get_system_config()
enable_screenshot_scheduled = cfg.get("enable_screenshot", 0) == 1
for user in approved_users:
user_id = user["id"]
accounts = safe_get_user_accounts_snapshot(user_id)
if not accounts:
load_user_accounts(user_id)
accounts = safe_get_user_accounts_snapshot(user_id)
for account_id, account in accounts.items():
total_accounts += 1
if account.is_running:
continue
account_status_info = database.get_account_status(account_id)
if account_status_info:
status = account_status_info["status"] if "status" in account_status_info.keys() else "active"
if status == "suspended":
fail_count = (
account_status_info["login_fail_count"]
if "login_fail_count" in account_status_info.keys()
else 0
)
logger.info(
f"[定时任务] 跳过暂停账号: {account.username} (用户:{user['username']}) - 连续{fail_count}次密码错误,需修改密码"
)
continue
if account.username in executed_usernames:
skipped_duplicates += 1
logger.info(
f"[定时任务] 跳过重复账号: {account.username} (用户:{user['username']}) - 该账号已被其他用户执行"
)
continue
executed_usernames.add(account.username)
logger.info(f"[定时任务] 启动账号: {account.username} (用户:{user['username']})")
ok, msg = submit_account_task(
user_id=user_id,
account_id=account_id,
browse_type=browse_type,
enable_screenshot=enable_screenshot_scheduled,
source="scheduled",
)
if ok:
executed_accounts += 1
else:
logger.warning(f"[定时任务] 启动失败({account.username}): {msg}")
time.sleep(2)
logger.info(
f"[定时任务] 执行完成 - 总账号数:{total_accounts}, 已执行:{executed_accounts}, 跳过重复:{skipped_duplicates}"
)
except Exception as e:
logger.exception(f"[定时任务] 执行出错: {str(e)}")
def scheduled_task_worker() -> None:
"""定时任务工作线程"""
import schedule
def cleanup_expired_captcha():
try:
deleted_count = safe_cleanup_expired_captcha()
if deleted_count > 0:
logger.info(f"[定时清理] 已清理 {deleted_count} 个过期验证码")
except Exception as e:
logger.warning(f"[定时清理] 清理验证码出错: {str(e)}")
def cleanup_old_data():
"""清理旧数据7天前截图和任务日志30天前操作日志和定时任务执行日志"""
try:
logger.info("[定时清理] 开始清理旧数据...")
deleted_logs = database.delete_old_task_logs(7)
logger.info(f"[定时清理] 已删除 {deleted_logs} 条任务日志")
deleted_operation_logs = database.clean_old_operation_logs(30)
logger.info(f"[定时清理] 已删除 {deleted_operation_logs} 条操作日志")
deleted_schedule_logs = database.clean_old_schedule_logs(30)
logger.info(f"[定时清理] 已删除 {deleted_schedule_logs} 条定时任务执行日志")
deleted_screenshots = 0
if os.path.exists(SCREENSHOTS_DIR):
cutoff_time = time.time() - (7 * 24 * 60 * 60)
for filename in os.listdir(SCREENSHOTS_DIR):
if filename.lower().endswith((".png", ".jpg", ".jpeg")):
filepath = os.path.join(SCREENSHOTS_DIR, filename)
try:
if os.path.getmtime(filepath) < cutoff_time:
os.remove(filepath)
deleted_screenshots += 1
except Exception as e:
logger.warning(f"[定时清理] 删除截图失败 {filename}: {str(e)}")
logger.info(f"[定时清理] 已删除 {deleted_screenshots} 个截图文件")
logger.info("[定时清理] 清理完成!")
except Exception as e:
logger.exception(f"[定时清理] 清理任务出错: {str(e)}")
def check_user_schedules():
"""检查并执行用户定时任务O-08next_run_at 索引驱动)。"""
import json
try:
now = get_beijing_now()
now_str = now.strftime("%Y-%m-%d %H:%M:%S")
current_weekday = now.isoweekday()
due_schedules = database.get_due_user_schedules(now_str, limit=50) or []
for schedule_config in due_schedules:
schedule_name = schedule_config.get("name", "未命名任务")
schedule_id = schedule_config["id"]
weekdays_str = schedule_config.get("weekdays", "1,2,3,4,5")
try:
allowed_weekdays = [int(d) for d in weekdays_str.split(",") if d.strip()]
except Exception as e:
logger.warning(f"[定时任务] 任务#{schedule_id} 解析weekdays失败: {e}")
try:
database.recompute_schedule_next_run(schedule_id)
except Exception:
pass
continue
if current_weekday not in allowed_weekdays:
try:
database.recompute_schedule_next_run(schedule_id)
except Exception:
pass
continue
logger.info(f"[用户定时任务] 任务#{schedule_id} '{schedule_name}' 到期,开始执行 (next_run_at={schedule_config.get('next_run_at')})")
user_id = schedule_config["user_id"]
schedule_id = schedule_config["id"]
browse_type = normalize_browse_type(schedule_config.get("browse_type", BROWSE_TYPE_SHOULD_READ))
enable_screenshot = schedule_config.get("enable_screenshot", 1)
try:
account_ids_raw = schedule_config.get("account_ids", "[]") or "[]"
account_ids = json.loads(account_ids_raw)
except Exception as e:
logger.warning(f"[定时任务] 任务#{schedule_id} 解析account_ids失败: {e}")
account_ids = []
if not account_ids:
try:
database.recompute_schedule_next_run(schedule_id)
except Exception:
pass
continue
if not safe_get_user_accounts_snapshot(user_id):
load_user_accounts(user_id)
import time as time_mod
import uuid
execution_start_time = time_mod.time()
log_id = database.create_schedule_execution_log(
schedule_id=schedule_id, user_id=user_id, schedule_name=schedule_config.get("name", "未命名任务")
)
batch_id = f"batch_{uuid.uuid4().hex[:12]}"
now_ts = time_mod.time()
safe_create_batch(
batch_id,
{
"user_id": user_id,
"browse_type": browse_type,
"schedule_name": schedule_config.get("name", "未命名任务"),
"screenshots": [],
"total_accounts": 0,
"completed": 0,
"created_at": now_ts,
"updated_at": now_ts,
},
)
started_count = 0
skipped_count = 0
completion_lock = threading.Lock()
remaining = {"count": 0, "done": False}
def on_browse_done():
with completion_lock:
remaining["count"] -= 1
if remaining["done"] or remaining["count"] > 0:
return
remaining["done"] = True
execution_duration = int(time_mod.time() - execution_start_time)
database.update_schedule_execution_log(
log_id,
total_accounts=len(account_ids),
success_accounts=started_count,
failed_accounts=len(account_ids) - started_count,
duration_seconds=execution_duration,
status="completed",
)
logger.info(
f"[用户定时任务] 任务#{schedule_id}浏览阶段完成,耗时{execution_duration}秒,等待截图完成后发送邮件"
)
for account_id in account_ids:
account = safe_get_account(user_id, account_id)
if not account:
skipped_count += 1
continue
if account.is_running:
skipped_count += 1
continue
task_source = f"user_scheduled:{batch_id}"
with completion_lock:
remaining["count"] += 1
ok, msg = submit_account_task(
user_id=user_id,
account_id=account_id,
browse_type=browse_type,
enable_screenshot=enable_screenshot,
source=task_source,
done_callback=on_browse_done,
)
if ok:
started_count += 1
else:
with completion_lock:
remaining["count"] -= 1
skipped_count += 1
logger.warning(f"[用户定时任务] 账号 {account.username} 启动失败: {msg}")
batch_info = safe_finalize_batch_after_dispatch(batch_id, started_count, now_ts=time_mod.time())
if batch_info:
_send_batch_task_email_if_configured(batch_info)
database.update_schedule_last_run(schedule_id)
logger.info(f"[用户定时任务] 已启动 {started_count} 个账号,跳过 {skipped_count} 个账号批次ID: {batch_id}")
if started_count <= 0:
database.update_schedule_execution_log(
log_id,
total_accounts=len(account_ids),
success_accounts=0,
failed_accounts=len(account_ids),
duration_seconds=0,
status="completed",
)
if started_count == 0 and len(account_ids) > 0:
logger.warning("[用户定时任务] ⚠️ 警告所有账号都被跳过了请检查user_accounts状态")
except Exception as e:
logger.exception(f"[用户定时任务] 检查出错: {str(e)}")
try:
config_check_interval = float(os.environ.get("SCHEDULER_CONFIG_CHECK_SECONDS", "30"))
except Exception:
config_check_interval = 30.0
config_check_interval = max(5.0, config_check_interval)
schedule_state = {"signature": None}
def check_and_schedule(force: bool = False):
config_data = database.get_system_config()
schedule_enabled = bool(config_data.get("schedule_enabled"))
schedule_time_cst = config_data.get("schedule_time", "02:00")
signature = (schedule_enabled, schedule_time_cst)
config_changed = schedule_state.get("signature") != signature
is_first_run = schedule_state.get("signature") is None
if (not force) and (not config_changed):
return
schedule_state["signature"] = signature
schedule.clear()
cleanup_time_cst = "03:00"
schedule.every().day.at(cleanup_time_cst).do(cleanup_old_data)
schedule.every().hour.do(cleanup_expired_captcha)
quota_reset_time_cst = "00:00"
schedule.every().day.at(quota_reset_time_cst).do(email_service.reset_smtp_daily_quota)
if is_first_run:
logger.info(f"[定时任务] 已设置数据清理任务: 每天 CST {cleanup_time_cst}")
logger.info(f"[定时任务] 已设置验证码清理任务: 每小时执行一次")
logger.info(f"[定时任务] 已设置SMTP配额重置: 每天 CST {quota_reset_time_cst}")
if schedule_enabled:
schedule.every().day.at(schedule_time_cst).do(run_scheduled_task)
if is_first_run or config_changed:
logger.info(f"[定时任务] 已设置浏览任务: 每天 CST {schedule_time_cst}")
elif config_changed and not is_first_run:
logger.info("[定时任务] 浏览任务已禁用")
check_and_schedule(force=True)
last_config_check = time.time()
last_user_schedule_minute = None
while True:
try:
schedule.run_pending()
now_ts = time.time()
if now_ts - last_config_check >= config_check_interval:
check_and_schedule()
last_config_check = now_ts
now_beijing = get_beijing_now()
minute_key = now_beijing.strftime("%Y-%m-%d %H:%M")
if minute_key != last_user_schedule_minute:
check_user_schedules()
last_user_schedule_minute = minute_key
time.sleep(1)
except Exception as e:
logger.exception(f"[定时任务] 调度器出错: {str(e)}")
time.sleep(5)

272
services/screenshots.py Normal file
View File

@@ -0,0 +1,272 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
import time
import database
import email_service
from app_config import get_config
from app_logger import get_logger
from browser_pool_worker import get_browser_worker_pool
from playwright_automation import PlaywrightAutomation
from services.browser_manager import get_browser_manager
from services.client_log import log_to_client
from services.runtime import get_socketio
from services.state import safe_get_account, safe_remove_task_status, safe_update_task_status
from services.task_batches import _batch_task_record_result, _get_batch_id_from_source
from services.time_utils import get_beijing_now
logger = get_logger("app")
config = get_config()
SCREENSHOTS_DIR = config.SCREENSHOTS_DIR
os.makedirs(SCREENSHOTS_DIR, exist_ok=True)
def _emit(event: str, data: object, *, room: str | None = None) -> None:
try:
socketio = get_socketio()
socketio.emit(event, data, room=room)
except Exception:
# runtime 未初始化时(如测试/离线脚本),忽略推送
pass
def take_screenshot_for_account(
user_id,
account_id,
browse_type="应读",
source="manual",
task_start_time=None,
browse_result=None,
):
"""为账号任务完成后截图(使用工作线程池,真正的浏览器复用)"""
account = safe_get_account(user_id, account_id)
if not account:
return
# 以本次调用的 browse_type 为准(避免 last_browse_type 被刷新/重载导致截图页面不一致)
if browse_type:
account.last_browse_type = browse_type
# 标记账号正在截图(防止重复提交截图任务)
account.is_running = True
def screenshot_task(browser_instance, user_id, account_id, account, browse_type, source, task_start_time, browse_result):
"""在worker线程中执行的截图任务"""
# ✅ 获得worker后立即更新状态为"截图中"
acc = safe_get_account(user_id, account_id)
if acc:
acc.status = "截图中"
safe_update_task_status(account_id, {"status": "运行中", "detail_status": "正在截图"})
_emit("account_update", acc.to_dict(), room=f"user_{user_id}")
max_retries = 3
for attempt in range(1, max_retries + 1):
automation = None
try:
safe_update_task_status(
account_id,
{"detail_status": f"正在截图{f' (第{attempt}次)' if attempt > 1 else ''}"},
)
if attempt > 1:
log_to_client(f"🔄 第 {attempt} 次截图尝试...", user_id, account_id)
log_to_client(
f"使用Worker-{browser_instance['worker_id']}的浏览器(已使用{browser_instance['use_count']}次)",
user_id,
account_id,
)
proxy_config = account.proxy_config if hasattr(account, "proxy_config") else None
automation = PlaywrightAutomation(get_browser_manager(), account_id, proxy_config=proxy_config)
automation.playwright = browser_instance["playwright"]
automation.browser = browser_instance["browser"]
def custom_log(message: str):
log_to_client(message, user_id, account_id)
automation.log = custom_log
log_to_client("登录中...", user_id, account_id)
login_result = automation.quick_login(account.username, account.password, account.remember)
if not login_result["success"]:
error_message = login_result.get("message", "截图登录失败")
log_to_client(f"截图登录失败: {error_message}", user_id, account_id)
if attempt < max_retries:
log_to_client("将重试...", user_id, account_id)
time.sleep(2)
continue
log_to_client("❌ 截图失败: 登录失败", user_id, account_id)
return {"success": False, "error": "登录失败"}
log_to_client(f"导航到 '{browse_type}' 页面...", user_id, account_id)
# 截图场景:优先用 bz 参数直达页面(更稳定,避免页面按钮点击失败导致截图跑偏)
navigated = False
try:
from urllib.parse import urlsplit
parsed = urlsplit(config.ZSGL_LOGIN_URL)
base = f"{parsed.scheme}://{parsed.netloc}"
if "注册前" in str(browse_type):
bz = 0
else:
bz = 2 # 应读
target_url = f"{base}/admin/center.aspx?bz={bz}"
automation.main_page.goto(target_url, timeout=60000)
current_url = getattr(automation.main_page, "url", "") or ""
if "center.aspx" not in current_url:
raise RuntimeError(f"unexpected_url:{current_url}")
try:
automation.main_page.wait_for_load_state("networkidle", timeout=30000)
except Exception:
pass
try:
automation.main_page.wait_for_selector("table.ltable", timeout=20000)
except Exception:
pass
navigated = True
except Exception as nav_error:
log_to_client(f"直达页面失败,将尝试按钮切换: {str(nav_error)[:120]}", user_id, account_id)
# 兼容兜底:若直达失败,则回退到原有按钮切换方式
if not navigated:
result = automation.browse_content(
navigate_only=True,
browse_type=browse_type,
auto_next_page=False,
auto_view_attachments=False,
interval=0,
should_stop_callback=None,
)
if not result.success and result.error_message:
log_to_client(f"导航警告: {result.error_message}", user_id, account_id)
time.sleep(2)
timestamp = get_beijing_now().strftime("%Y%m%d_%H%M%S")
user_info = database.get_user_by_id(user_id)
username_prefix = user_info["username"] if user_info else f"user{user_id}"
login_account = account.remark if account.remark else account.username
screenshot_filename = f"{username_prefix}_{login_account}_{browse_type}_{timestamp}.jpg"
screenshot_path = os.path.join(SCREENSHOTS_DIR, screenshot_filename)
if automation.take_screenshot(screenshot_path):
if os.path.exists(screenshot_path) and os.path.getsize(screenshot_path) > 1000:
log_to_client(f"✓ 截图成功: {screenshot_filename}", user_id, account_id)
return {"success": True, "filename": screenshot_filename}
log_to_client("截图文件异常,将重试", user_id, account_id)
if os.path.exists(screenshot_path):
os.remove(screenshot_path)
else:
log_to_client("截图保存失败", user_id, account_id)
if attempt < max_retries:
log_to_client("将重试...", user_id, account_id)
time.sleep(2)
except Exception as e:
log_to_client(f"截图出错: {str(e)}", user_id, account_id)
if attempt < max_retries:
log_to_client("将重试...", user_id, account_id)
time.sleep(2)
finally:
if automation:
try:
if automation.context:
automation.context.close()
automation.context = None
automation.page = None
except Exception as e:
logger.debug(f"关闭context时出错: {e}")
return {"success": False, "error": "截图失败已重试3次"}
def screenshot_callback(result, error):
"""截图完成回调"""
try:
account.is_running = False
account.status = "未开始"
safe_remove_task_status(account_id)
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
if error:
log_to_client(f"❌ 截图失败: {error}", user_id, account_id)
elif not result or not result.get("success"):
error_msg = result.get("error", "未知错误") if result else "未知错误"
log_to_client(f"❌ 截图失败: {error_msg}", user_id, account_id)
if task_start_time and browse_result:
import time as time_module
total_elapsed = int(time_module.time() - task_start_time)
database.create_task_log(
user_id=user_id,
account_id=account_id,
username=account.username,
browse_type=browse_type,
status="success",
total_items=browse_result.get("total_items", 0),
total_attachments=browse_result.get("total_attachments", 0),
duration=total_elapsed,
source=source,
)
try:
batch_id = _get_batch_id_from_source(source)
screenshot_path = None
if result and result.get("success") and result.get("filename"):
screenshot_path = os.path.join(SCREENSHOTS_DIR, result["filename"])
account_name = account.remark if account.remark else account.username
if batch_id:
_batch_task_record_result(
batch_id=batch_id,
account_name=account_name,
screenshot_path=screenshot_path,
total_items=browse_result.get("total_items", 0),
total_attachments=browse_result.get("total_attachments", 0),
)
elif source and source.startswith("user_scheduled"):
user_info = database.get_user_by_id(user_id)
if user_info and user_info.get("email") and database.get_user_email_notify(user_id):
email_service.send_task_complete_email_async(
user_id=user_id,
email=user_info["email"],
username=user_info["username"],
account_name=account_name,
browse_type=browse_type,
total_items=browse_result.get("total_items", 0),
total_attachments=browse_result.get("total_attachments", 0),
screenshot_path=screenshot_path,
log_callback=lambda msg: log_to_client(msg, user_id, account_id),
)
except Exception as email_error:
logger.warning(f"发送任务完成邮件失败: {email_error}")
except Exception as e:
logger.error(f"截图回调出错: {e}")
pool = get_browser_worker_pool()
submitted = pool.submit_task(
screenshot_task,
screenshot_callback,
user_id,
account_id,
account,
browse_type,
source,
task_start_time,
browse_result,
)
if not submitted:
screenshot_callback(None, "截图队列已满,请稍后重试")

481
services/state.py Normal file
View File

@@ -0,0 +1,481 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
线程安全的全局状态管理P0 / O-01
约束:
- 业务代码禁止直接读写底层 dict必须通过本模块 safe_* API 访问
- 读:要么持锁并返回副本,要么以“快照”的方式返回可迭代列表
"""
from __future__ import annotations
import threading
import time
from typing import Any, Dict, List, Optional, Tuple
from app_config import get_config
config = get_config()
# ==================== Active tasks运行中的任务句柄 ====================
_active_tasks: Dict[str, Any] = {}
_active_tasks_lock = threading.RLock()
def safe_set_task(account_id: str, handle: Any) -> None:
with _active_tasks_lock:
_active_tasks[account_id] = handle
def safe_get_task(account_id: str) -> Any:
with _active_tasks_lock:
return _active_tasks.get(account_id)
def safe_remove_task(account_id: str) -> Any:
with _active_tasks_lock:
return _active_tasks.pop(account_id, None)
def safe_get_active_task_ids() -> List[str]:
with _active_tasks_lock:
return list(_active_tasks.keys())
# ==================== Task status前端展示状态 ====================
_task_status: Dict[str, Dict[str, Any]] = {}
_task_status_lock = threading.RLock()
def safe_set_task_status(account_id: str, status_dict: Dict[str, Any]) -> None:
with _task_status_lock:
_task_status[account_id] = dict(status_dict or {})
def safe_update_task_status(account_id: str, updates: Dict[str, Any]) -> bool:
with _task_status_lock:
if account_id not in _task_status:
return False
_task_status[account_id].update(updates or {})
return True
def safe_get_task_status(account_id: str) -> Dict[str, Any]:
with _task_status_lock:
value = _task_status.get(account_id)
return dict(value) if value else {}
def safe_remove_task_status(account_id: str) -> Dict[str, Any]:
with _task_status_lock:
return _task_status.pop(account_id, None)
def safe_get_all_task_status() -> Dict[str, Dict[str, Any]]:
with _task_status_lock:
return {k: dict(v) for k, v in _task_status.items()}
def safe_iter_task_status_items() -> List[Tuple[str, Dict[str, Any]]]:
with _task_status_lock:
return [(k, dict(v)) for k, v in _task_status.items()]
# ==================== User accounts cache账号对象缓存 ====================
_user_accounts: Dict[int, Dict[str, Any]] = {}
_user_accounts_last_access: Dict[int, float] = {}
_user_accounts_lock = threading.RLock()
def safe_touch_user_accounts(user_id: int) -> None:
now_ts = time.time()
with _user_accounts_lock:
_user_accounts_last_access[int(user_id)] = now_ts
def safe_get_user_accounts_last_access_items() -> List[Tuple[int, float]]:
with _user_accounts_lock:
return list(_user_accounts_last_access.items())
def safe_get_user_accounts_snapshot(user_id: int) -> Dict[str, Any]:
with _user_accounts_lock:
return dict(_user_accounts.get(int(user_id), {}))
def safe_set_user_accounts(user_id: int, accounts_by_id: Dict[str, Any]) -> None:
with _user_accounts_lock:
_user_accounts[int(user_id)] = dict(accounts_by_id or {})
_user_accounts_last_access[int(user_id)] = time.time()
def safe_get_account(user_id: int, account_id: str) -> Any:
with _user_accounts_lock:
return _user_accounts.get(int(user_id), {}).get(account_id)
def safe_set_account(user_id: int, account_id: str, account_obj: Any) -> None:
with _user_accounts_lock:
uid = int(user_id)
if uid not in _user_accounts:
_user_accounts[uid] = {}
_user_accounts[uid][account_id] = account_obj
_user_accounts_last_access[uid] = time.time()
def safe_remove_account(user_id: int, account_id: str) -> Any:
with _user_accounts_lock:
uid = int(user_id)
if uid not in _user_accounts:
return None
return _user_accounts[uid].pop(account_id, None)
def safe_remove_user_accounts(user_id: int) -> None:
with _user_accounts_lock:
uid = int(user_id)
_user_accounts.pop(uid, None)
_user_accounts_last_access.pop(uid, None)
def safe_iter_user_accounts_items() -> List[Tuple[int, Dict[str, Any]]]:
with _user_accounts_lock:
return [(uid, dict(accounts)) for uid, accounts in _user_accounts.items()]
def safe_has_user(user_id: int) -> bool:
with _user_accounts_lock:
return int(user_id) in _user_accounts
# ==================== Log cache用户维度日志缓存 ====================
_log_cache: Dict[int, List[Dict[str, Any]]] = {}
_log_cache_lock = threading.RLock()
_log_cache_total_count = 0
def safe_add_log(
user_id: int,
log_entry: Dict[str, Any],
*,
max_logs_per_user: Optional[int] = None,
max_total_logs: Optional[int] = None,
) -> None:
global _log_cache_total_count
uid = int(user_id)
max_logs_per_user = int(max_logs_per_user or config.MAX_LOGS_PER_USER)
max_total_logs = int(max_total_logs or config.MAX_TOTAL_LOGS)
with _log_cache_lock:
if uid not in _log_cache:
_log_cache[uid] = []
if len(_log_cache[uid]) >= max_logs_per_user:
_log_cache[uid].pop(0)
_log_cache_total_count = max(0, _log_cache_total_count - 1)
_log_cache[uid].append(dict(log_entry or {}))
_log_cache_total_count += 1
while _log_cache_total_count > max_total_logs:
if not _log_cache:
break
max_user = max(_log_cache.keys(), key=lambda u: len(_log_cache[u]))
if _log_cache.get(max_user):
_log_cache[max_user].pop(0)
_log_cache_total_count -= 1
else:
break
def safe_get_user_logs(user_id: int) -> List[Dict[str, Any]]:
uid = int(user_id)
with _log_cache_lock:
return list(_log_cache.get(uid, []))
def safe_clear_user_logs(user_id: int) -> None:
global _log_cache_total_count
uid = int(user_id)
with _log_cache_lock:
removed = len(_log_cache.get(uid, []))
_log_cache.pop(uid, None)
_log_cache_total_count = max(0, _log_cache_total_count - removed)
def safe_get_log_cache_total_count() -> int:
with _log_cache_lock:
return int(_log_cache_total_count)
# ==================== Captcha storage验证码存储 ====================
_captcha_storage: Dict[str, Dict[str, Any]] = {}
_captcha_storage_lock = threading.RLock()
def safe_set_captcha(session_id: str, captcha_data: Dict[str, Any]) -> None:
with _captcha_storage_lock:
_captcha_storage[str(session_id)] = dict(captcha_data or {})
def safe_cleanup_expired_captcha(now_ts: Optional[float] = None) -> int:
now_ts = float(now_ts if now_ts is not None else time.time())
with _captcha_storage_lock:
expired = [k for k, v in _captcha_storage.items() if float(v.get("expire_time", 0) or 0) < now_ts]
for k in expired:
_captcha_storage.pop(k, None)
return len(expired)
def safe_delete_captcha(session_id: str) -> None:
with _captcha_storage_lock:
_captcha_storage.pop(str(session_id), None)
def safe_verify_and_consume_captcha(session_id: str, code: str, *, max_attempts: Optional[int] = None) -> Tuple[bool, str]:
max_attempts = int(max_attempts or config.MAX_CAPTCHA_ATTEMPTS)
with _captcha_storage_lock:
captcha_data = _captcha_storage.pop(str(session_id), None)
if captcha_data is None:
return False, "验证码已过期或不存在,请重新获取"
try:
if float(captcha_data.get("expire_time", 0) or 0) < time.time():
return False, "验证码已过期,请重新获取"
failed_attempts = int(captcha_data.get("failed_attempts", 0) or 0)
if failed_attempts >= max_attempts:
return False, f"验证码错误次数过多({max_attempts}次),请重新获取"
expected = str(captcha_data.get("code", "") or "").lower()
actual = str(code or "").lower()
if expected != actual:
failed_attempts += 1
captcha_data["failed_attempts"] = failed_attempts
if failed_attempts < max_attempts:
_captcha_storage[str(session_id)] = captcha_data
return False, "验证码错误"
return True, "验证成功"
except Exception:
return False, "验证码验证失败,请重新获取"
# ==================== IP rate limit验证码失败限流 ====================
_ip_rate_limit: Dict[str, Dict[str, Any]] = {}
_ip_rate_limit_lock = threading.RLock()
def check_ip_rate_limit(
ip_address: str,
*,
max_attempts_per_hour: Optional[int] = None,
lock_duration_seconds: Optional[int] = None,
) -> Tuple[bool, Optional[str]]:
current_time = time.time()
max_attempts_per_hour = int(max_attempts_per_hour or config.MAX_IP_ATTEMPTS_PER_HOUR)
lock_duration_seconds = int(lock_duration_seconds or config.IP_LOCK_DURATION)
with _ip_rate_limit_lock:
expired_ips = []
for ip, data in _ip_rate_limit.items():
lock_expired = float(data.get("lock_until", 0) or 0) < current_time
first_attempt = data.get("first_attempt")
attempt_expired = first_attempt is None or (current_time - float(first_attempt)) > 3600
if lock_expired and attempt_expired:
expired_ips.append(ip)
for ip in expired_ips:
_ip_rate_limit.pop(ip, None)
ip_key = str(ip_address)
if ip_key in _ip_rate_limit:
ip_data = _ip_rate_limit[ip_key]
if float(ip_data.get("lock_until", 0) or 0) > current_time:
remaining_time = int(float(ip_data["lock_until"]) - current_time)
return False, f"IP已被锁定,请{remaining_time // 60 + 1}分钟后再试"
first_attempt = ip_data.get("first_attempt")
if first_attempt is None or current_time - float(first_attempt) > 3600:
_ip_rate_limit[ip_key] = {"attempts": 0, "first_attempt": current_time}
return True, None
def record_failed_captcha(
ip_address: str,
*,
max_attempts_per_hour: Optional[int] = None,
lock_duration_seconds: Optional[int] = None,
) -> bool:
current_time = time.time()
max_attempts_per_hour = int(max_attempts_per_hour or config.MAX_IP_ATTEMPTS_PER_HOUR)
lock_duration_seconds = int(lock_duration_seconds or config.IP_LOCK_DURATION)
with _ip_rate_limit_lock:
ip_key = str(ip_address)
if ip_key not in _ip_rate_limit:
_ip_rate_limit[ip_key] = {"attempts": 1, "first_attempt": current_time}
else:
_ip_rate_limit[ip_key]["attempts"] = int(_ip_rate_limit[ip_key].get("attempts", 0) or 0) + 1
if int(_ip_rate_limit[ip_key].get("attempts", 0) or 0) >= max_attempts_per_hour:
_ip_rate_limit[ip_key]["lock_until"] = current_time + lock_duration_seconds
return True
return False
def cleanup_expired_ip_rate_limits(now_ts: Optional[float] = None) -> int:
now_ts = float(now_ts if now_ts is not None else time.time())
with _ip_rate_limit_lock:
expired_ips = []
for ip, data in _ip_rate_limit.items():
lock_until = float(data.get("lock_until", 0) or 0)
first_attempt = float(data.get("first_attempt", 0) or 0)
if lock_until < now_ts and (now_ts - first_attempt) > 3600:
expired_ips.append(ip)
for ip in expired_ips:
_ip_rate_limit.pop(ip, None)
return len(expired_ips)
def safe_get_ip_lock_until(ip_address: str) -> float:
"""获取指定 IP 的锁定截至时间戳(未锁定返回 0"""
ip_key = str(ip_address)
with _ip_rate_limit_lock:
data = _ip_rate_limit.get(ip_key) or {}
try:
return float(data.get("lock_until", 0) or 0)
except Exception:
return 0.0
# ==================== Batch screenshots批次任务截图收集 ====================
_batch_task_screenshots: Dict[str, Dict[str, Any]] = {}
_batch_task_lock = threading.RLock()
def safe_create_batch(batch_id: str, batch_info: Dict[str, Any]) -> None:
with _batch_task_lock:
_batch_task_screenshots[str(batch_id)] = dict(batch_info or {})
def safe_get_batch(batch_id: str) -> Optional[Dict[str, Any]]:
with _batch_task_lock:
info = _batch_task_screenshots.get(str(batch_id))
return dict(info) if info else None
def safe_update_batch(batch_id: str, updates: Dict[str, Any]) -> bool:
with _batch_task_lock:
if str(batch_id) not in _batch_task_screenshots:
return False
_batch_task_screenshots[str(batch_id)].update(dict(updates or {}))
return True
def safe_pop_batch(batch_id: str) -> Optional[Dict[str, Any]]:
with _batch_task_lock:
return _batch_task_screenshots.pop(str(batch_id), None)
def safe_batch_append_result(batch_id: str, result: Dict[str, Any]) -> Optional[Dict[str, Any]]:
now_ts = time.time()
with _batch_task_lock:
info = _batch_task_screenshots.get(str(batch_id))
if not info:
return None
info.setdefault("screenshots", []).append(dict(result or {}))
info["completed"] = int(info.get("completed", 0) or 0) + 1
info["updated_at"] = now_ts
total = int(info.get("total_accounts", 0) or 0)
if total > 0 and int(info.get("completed", 0) or 0) >= total:
return _batch_task_screenshots.pop(str(batch_id), None)
return None
def safe_cleanup_expired_batches(expire_seconds: int, now_ts: Optional[float] = None) -> int:
now_ts = float(now_ts if now_ts is not None else time.time())
expire_seconds = max(1, int(expire_seconds))
with _batch_task_lock:
expired = []
for batch_id, info in list(_batch_task_screenshots.items()):
last_ts = info.get("updated_at") or info.get("created_at") or info.get("created_time") or now_ts
if (now_ts - float(last_ts)) > expire_seconds:
expired.append(batch_id)
for batch_id in expired:
_batch_task_screenshots.pop(batch_id, None)
return len(expired)
def safe_finalize_batch_after_dispatch(batch_id: str, total_accounts: int, *, now_ts: Optional[float] = None) -> Optional[Dict[str, Any]]:
"""定时批次任务:更新总账号数,并在“已完成>=总数”时弹出批次数据用于发邮件。"""
now_ts = float(now_ts if now_ts is not None else time.time())
with _batch_task_lock:
info = _batch_task_screenshots.get(str(batch_id))
if not info:
return None
info["total_accounts"] = int(total_accounts or 0)
info["updated_at"] = now_ts
if int(total_accounts or 0) <= 0:
_batch_task_screenshots.pop(str(batch_id), None)
return None
if int(info.get("completed", 0) or 0) >= int(total_accounts):
return _batch_task_screenshots.pop(str(batch_id), None)
return None
# ==================== Pending random schedules兼容旧随机延迟逻辑 ====================
_pending_random_schedules: Dict[int, Dict[str, Any]] = {}
_pending_random_lock = threading.RLock()
def safe_set_pending_random_schedule(schedule_id: int, info: Dict[str, Any]) -> None:
with _pending_random_lock:
_pending_random_schedules[int(schedule_id)] = dict(info or {})
def safe_get_pending_random_schedule(schedule_id: int) -> Optional[Dict[str, Any]]:
with _pending_random_lock:
value = _pending_random_schedules.get(int(schedule_id))
return dict(value) if value else None
def safe_pop_pending_random_schedule(schedule_id: int) -> Optional[Dict[str, Any]]:
with _pending_random_lock:
return _pending_random_schedules.pop(int(schedule_id), None)
def safe_iter_pending_random_schedules_items() -> List[Tuple[int, Dict[str, Any]]]:
with _pending_random_lock:
return [(sid, dict(info)) for sid, info in _pending_random_schedules.items()]
def safe_cleanup_expired_pending_random(expire_seconds: int, now_ts: Optional[float] = None) -> int:
now_ts = float(now_ts if now_ts is not None else time.time())
expire_seconds = max(1, int(expire_seconds))
with _pending_random_lock:
expired = []
for schedule_id, info in list(_pending_random_schedules.items()):
created_at = info.get("created_at") or info.get("created_time") or now_ts
if (now_ts - float(created_at)) > expire_seconds:
expired.append(schedule_id)
for schedule_id in expired:
_pending_random_schedules.pop(int(schedule_id), None)
return len(expired)

70
services/task_batches.py Normal file
View File

@@ -0,0 +1,70 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
import database
import email_service
from services.runtime import get_logger
from services.state import safe_batch_append_result
def _get_batch_id_from_source(source: str):
"""从source中提取批次IDsource格式: user_scheduled:batch_xxx"""
if not source:
return None
if source.startswith("user_scheduled:batch_"):
return source.split(":", 1)[1]
return None
def _send_batch_task_email_if_configured(batch_info: dict):
"""批次任务:当所有账号完成后发送打包邮件(在锁外调用)。"""
logger = get_logger()
try:
batch_user_id = batch_info.get("user_id")
if not batch_user_id:
return
user_info = database.get_user_by_id(batch_user_id)
if not user_info or not user_info.get("email"):
return
if not database.get_user_email_notify(batch_user_id):
return
if not batch_info.get("screenshots"):
return
email_service.send_batch_task_complete_email_async(
user_id=batch_user_id,
email=user_info["email"],
username=user_info["username"],
schedule_name=batch_info.get("schedule_name", "未命名任务"),
browse_type=batch_info.get("browse_type", "应读"),
screenshots=batch_info["screenshots"],
)
logger.info(f"[批次邮件] 已发送打包邮件,包含 {len(batch_info['screenshots'])} 条记录")
except Exception as e:
logger.warning(f"[批次邮件] 发送失败: {e}")
def _batch_task_record_result(
batch_id: str,
account_name: str,
screenshot_path: str,
total_items: int,
total_attachments: int,
):
"""批次任务:记录单账号结果,达到完成条件时触发邮件并回收内存。"""
logger = get_logger()
batch_info = safe_batch_append_result(
batch_id,
{
"account_name": account_name,
"path": screenshot_path,
"items": total_items,
"attachments": total_attachments,
},
)
if batch_info:
logger.info(
f"[批次邮件] 批次 {batch_id} 已完成: {batch_info.get('completed')}/{batch_info.get('total_accounts')},准备发送邮件"
)
_send_batch_task_email_if_configured(batch_info)

838
services/tasks.py Normal file
View File

@@ -0,0 +1,838 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
import heapq
import os
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
import database
import email_service
from api_browser import APIBrowser
from app_config import get_config
from app_logger import LoggerAdapter, get_logger
from services.checkpoints import get_checkpoint_mgr
from services.client_log import log_to_client
from services.proxy import get_proxy_from_api
from services.runtime import get_socketio
from services.screenshots import take_screenshot_for_account
from services.state import (
safe_get_account,
safe_get_task,
safe_remove_task,
safe_remove_task_status,
safe_set_task,
safe_set_task_status,
safe_update_task_status,
)
from services.task_batches import _batch_task_record_result, _get_batch_id_from_source
from task_checkpoint import TaskStage
logger = get_logger("app")
config = get_config()
# VIP优先级队列仅用于可视化/调试)
vip_task_queue = [] # VIP用户任务队列
normal_task_queue = [] # 普通用户任务队列
task_queue_lock = threading.Lock()
# 并发默认值(启动后会由系统配置覆盖并调用 update_limits
max_concurrent_per_account = config.MAX_CONCURRENT_PER_ACCOUNT
max_concurrent_global = config.MAX_CONCURRENT_GLOBAL
def _emit(event: str, data: object, *, room: str | None = None) -> None:
try:
socketio = get_socketio()
socketio.emit(event, data, room=room)
except Exception:
pass
@dataclass
class _TaskRequest:
user_id: int
account_id: str
browse_type: str
enable_screenshot: bool
source: str
retry_count: int
submitted_at: float
is_vip: bool
seq: int
canceled: bool = False
done_callback: object = None
class TaskScheduler:
"""全局任务调度器:队列排队,不为每个任务单独创建线程。"""
def __init__(self, max_global: int, max_per_user: int, max_queue_size: int = 1000):
self.max_global = max(1, int(max_global))
self.max_per_user = max(1, int(max_per_user))
self.max_queue_size = max(1, int(max_queue_size))
self._cond = threading.Condition()
self._pending = [] # heap: (priority, submitted_at, seq, task)
self._pending_by_account = {} # {account_id: task}
self._seq = 0
self._running_global = 0
self._running_by_user = {} # {user_id: running_count}
self._executor_max_workers = self.max_global
self._executor = ThreadPoolExecutor(max_workers=self._executor_max_workers, thread_name_prefix="TaskWorker")
self._old_executors = []
self._running = True
self._dispatcher_thread = threading.Thread(target=self._dispatch_loop, daemon=True, name="TaskDispatcher")
self._dispatcher_thread.start()
def shutdown(self, timeout: float = 5.0):
"""停止调度器(用于进程退出清理)"""
with self._cond:
self._running = False
self._cond.notify_all()
try:
self._dispatcher_thread.join(timeout=timeout)
except Exception:
pass
try:
self._executor.shutdown(wait=False)
except Exception:
pass
for ex in self._old_executors:
try:
ex.shutdown(wait=False)
except Exception:
pass
def update_limits(self, max_global: int = None, max_per_user: int = None, max_queue_size: int = None):
"""动态更新并发/队列上限(不影响已在运行的任务)"""
with self._cond:
if max_per_user is not None:
self.max_per_user = max(1, int(max_per_user))
if max_queue_size is not None:
self.max_queue_size = max(1, int(max_queue_size))
if max_global is not None:
new_max_global = max(1, int(max_global))
self.max_global = new_max_global
if new_max_global > self._executor_max_workers:
self._old_executors.append(self._executor)
self._executor_max_workers = new_max_global
self._executor = ThreadPoolExecutor(
max_workers=self._executor_max_workers, thread_name_prefix="TaskWorker"
)
try:
self._old_executors[-1].shutdown(wait=False)
except Exception:
pass
self._cond.notify_all()
def submit_task(
self,
user_id: int,
account_id: str,
browse_type: str,
enable_screenshot: bool = True,
source: str = "manual",
retry_count: int = 0,
is_vip: bool = None,
done_callback=None,
):
"""提交任务进入队列(返回: (ok, message)"""
if not user_id or not account_id:
return False, "参数错误"
submitted_at = time.time()
if is_vip is None:
try:
is_vip = bool(database.is_user_vip(user_id))
except Exception:
is_vip = False
else:
is_vip = bool(is_vip)
with self._cond:
if not self._running:
return False, "调度器未运行"
if len(self._pending_by_account) >= self.max_queue_size:
return False, "任务队列已满,请稍后再试"
if account_id in self._pending_by_account:
return False, "任务已在队列中"
if safe_get_task(account_id) is not None:
return False, "任务已在运行中"
self._seq += 1
task = _TaskRequest(
user_id=user_id,
account_id=account_id,
browse_type=browse_type,
enable_screenshot=bool(enable_screenshot),
source=source,
retry_count=int(retry_count or 0),
submitted_at=submitted_at,
is_vip=is_vip,
seq=self._seq,
done_callback=done_callback,
)
self._pending_by_account[account_id] = task
priority = 0 if is_vip else 1
heapq.heappush(self._pending, (priority, task.submitted_at, task.seq, task))
self._cond.notify_all()
# 用于可视化/调试:记录队列
with task_queue_lock:
if is_vip:
vip_task_queue.append(account_id)
else:
normal_task_queue.append(account_id)
return True, "已加入队列"
def cancel_pending_task(self, user_id: int, account_id: str) -> bool:
"""取消尚未开始的排队任务(已运行的任务由 should_stop 控制)"""
canceled_task = None
with self._cond:
task = self._pending_by_account.pop(account_id, None)
if not task:
return False
task.canceled = True
canceled_task = task
self._cond.notify_all()
# 从可视化队列移除
with task_queue_lock:
if account_id in vip_task_queue:
vip_task_queue.remove(account_id)
if account_id in normal_task_queue:
normal_task_queue.remove(account_id)
# 批次任务:取消也要推进完成计数,避免批次缓存常驻
try:
batch_id = _get_batch_id_from_source(canceled_task.source)
if batch_id:
acc = safe_get_account(user_id, account_id)
if acc:
account_name = acc.remark if acc.remark else acc.username
else:
account_name = account_id
_batch_task_record_result(
batch_id=batch_id,
account_name=account_name,
screenshot_path=None,
total_items=0,
total_attachments=0,
)
except Exception:
pass
return True
def _dispatch_loop(self):
while True:
task = None
with self._cond:
if not self._running:
return
if not self._pending or self._running_global >= self.max_global:
self._cond.wait(timeout=0.5)
continue
task = self._pop_next_runnable_locked()
if task is None:
self._cond.wait(timeout=0.5)
continue
self._running_global += 1
self._running_by_user[task.user_id] = self._running_by_user.get(task.user_id, 0) + 1
# 从队列移除(可视化)
with task_queue_lock:
if task.account_id in vip_task_queue:
vip_task_queue.remove(task.account_id)
if task.account_id in normal_task_queue:
normal_task_queue.remove(task.account_id)
try:
future = self._executor.submit(self._run_task_wrapper, task)
safe_set_task(task.account_id, future)
except Exception:
with self._cond:
self._running_global = max(0, self._running_global - 1)
self._running_by_user[task.user_id] = max(0, self._running_by_user.get(task.user_id, 1) - 1)
if self._running_by_user.get(task.user_id) == 0:
self._running_by_user.pop(task.user_id, None)
self._cond.notify_all()
def _pop_next_runnable_locked(self):
"""在锁内从优先队列取出“可运行”的任务避免VIP任务占位阻塞普通任务。"""
if not self._pending:
return None
skipped = []
selected = None
while self._pending:
_, _, _, task = heapq.heappop(self._pending)
if task.canceled:
continue
if self._pending_by_account.get(task.account_id) is not task:
continue
running_for_user = self._running_by_user.get(task.user_id, 0)
if running_for_user >= self.max_per_user:
skipped.append(task)
continue
selected = task
break
for t in skipped:
priority = 0 if t.is_vip else 1
heapq.heappush(self._pending, (priority, t.submitted_at, t.seq, t))
if selected is None:
return None
self._pending_by_account.pop(selected.account_id, None)
return selected
def _run_task_wrapper(self, task: _TaskRequest):
try:
run_task(
user_id=task.user_id,
account_id=task.account_id,
browse_type=task.browse_type,
enable_screenshot=task.enable_screenshot,
source=task.source,
retry_count=task.retry_count,
)
finally:
try:
if callable(task.done_callback):
task.done_callback()
except Exception:
pass
safe_remove_task(task.account_id)
with self._cond:
self._running_global = max(0, self._running_global - 1)
self._running_by_user[task.user_id] = max(0, self._running_by_user.get(task.user_id, 1) - 1)
if self._running_by_user.get(task.user_id) == 0:
self._running_by_user.pop(task.user_id, None)
self._cond.notify_all()
_task_scheduler = None
_task_scheduler_lock = threading.Lock()
def get_task_scheduler() -> TaskScheduler:
"""获取全局任务调度器(单例)"""
global _task_scheduler
with _task_scheduler_lock:
if _task_scheduler is None:
try:
max_queue_size = int(os.environ.get("TASK_QUEUE_MAXSIZE", "1000"))
except Exception:
max_queue_size = 1000
_task_scheduler = TaskScheduler(
max_global=max_concurrent_global,
max_per_user=max_concurrent_per_account,
max_queue_size=max_queue_size,
)
return _task_scheduler
def submit_account_task(
user_id: int,
account_id: str,
browse_type: str,
enable_screenshot: bool = True,
source: str = "manual",
retry_count: int = 0,
done_callback=None,
):
"""统一入口:提交账号任务进入队列"""
account = safe_get_account(user_id, account_id)
if not account:
return False, "账号不存在"
if getattr(account, "is_running", False):
return False, "任务已在运行中"
try:
is_vip_user = bool(database.is_user_vip(user_id))
except Exception:
is_vip_user = False
account.is_running = True
account.should_stop = False
account.status = "排队中" + (" (VIP)" if is_vip_user else "")
safe_set_task_status(
account_id,
{
"user_id": user_id,
"username": account.username,
"status": "排队中",
"detail_status": "等待资源" + (" [VIP优先]" if is_vip_user else ""),
"browse_type": browse_type,
"start_time": time.time(),
"source": source,
"progress": {"items": 0, "attachments": 0},
"is_vip": is_vip_user,
},
)
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
scheduler = get_task_scheduler()
ok, message = scheduler.submit_task(
user_id=user_id,
account_id=account_id,
browse_type=browse_type,
enable_screenshot=enable_screenshot,
source=source,
retry_count=retry_count,
is_vip=is_vip_user,
done_callback=done_callback,
)
if not ok:
account.is_running = False
account.status = "未开始"
safe_remove_task_status(account_id)
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
return False, message
log_to_client(message + (" [VIP优先]" if is_vip_user else ""), user_id, account_id)
return True, message
def run_task(user_id, account_id, browse_type, enable_screenshot=True, source="manual", retry_count=0):
"""运行自动化任务
Args:
retry_count: 当前重试次数用于自动重试机制最多重试2次
"""
MAX_AUTO_RETRY = 2 # 最大自动重试次数
LoggerAdapter("app", {"user_id": user_id, "account_id": account_id, "source": source}).debug(
f"run_task enable_screenshot={enable_screenshot} ({type(enable_screenshot).__name__}), retry={retry_count}"
)
account = safe_get_account(user_id, account_id)
if not account:
return
batch_id = _get_batch_id_from_source(source)
batch_recorded = False
checkpoint_mgr = get_checkpoint_mgr()
import time as time_module
try:
if account.should_stop:
log_to_client("任务已取消", user_id, account_id)
account.status = "已停止"
account.is_running = False
safe_remove_task_status(account_id)
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
if batch_id:
account_name = account.remark if account.remark else account.username
_batch_task_record_result(
batch_id=batch_id,
account_name=account_name,
screenshot_path=None,
total_items=0,
total_attachments=0,
)
return
try:
if account.should_stop:
log_to_client("任务已取消", user_id, account_id)
account.status = "已停止"
account.is_running = False
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
return
task_id = checkpoint_mgr.create_checkpoint(
user_id=user_id, account_id=account_id, username=account.username, browse_type=browse_type
)
logger.info(f"[断点] 任务 {task_id} 已创建")
task_start_time = time_module.time()
account.status = "运行中"
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
account.last_browse_type = browse_type
safe_update_task_status(account_id, {"status": "运行中", "detail_status": "初始化", "start_time": task_start_time})
max_attempts = 3
for attempt in range(1, max_attempts + 1):
try:
if attempt > 1:
log_to_client(f"🔄 第 {attempt} 次尝试(共{max_attempts}次)...", user_id, account_id)
proxy_config = None
config = database.get_system_config()
if config.get("proxy_enabled") == 1:
proxy_api_url = config.get("proxy_api_url", "").strip()
if proxy_api_url:
log_to_client("正在获取代理IP...", user_id, account_id)
proxy_server = get_proxy_from_api(proxy_api_url, max_retries=3)
if proxy_server:
proxy_config = {"server": proxy_server}
log_to_client(f"✓ 将使用代理: {proxy_server}", user_id, account_id)
account.proxy_config = proxy_config # 保存代理配置供截图使用
else:
log_to_client("✗ 代理获取失败,将不使用代理继续", user_id, account_id)
else:
log_to_client("⚠ 代理已启用但未配置API地址", user_id, account_id)
checkpoint_mgr.update_stage(task_id, TaskStage.STARTING, progress_percent=10)
def custom_log(message: str):
log_to_client(message, user_id, account_id)
log_to_client("开始登录...", user_id, account_id)
safe_update_task_status(account_id, {"detail_status": "正在登录"})
checkpoint_mgr.update_stage(task_id, TaskStage.LOGGING_IN, progress_percent=25)
with APIBrowser(log_callback=custom_log, proxy_config=proxy_config) as api_browser:
if api_browser.login(account.username, account.password):
log_to_client("✓ 登录成功!", user_id, account_id)
api_browser.save_cookies_for_playwright(account.username)
database.reset_account_login_status(account_id)
if not account.remark:
try:
real_name = api_browser.get_real_name()
if real_name:
account.remark = real_name
database.update_account_remark(account_id, real_name)
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
logger.info(f"[自动备注] 账号 {account.username} 自动设置备注为: {real_name}")
except Exception as e:
logger.warning(f"[自动备注] 获取姓名失败: {e}")
safe_update_task_status(account_id, {"detail_status": "正在浏览"})
log_to_client(f"开始浏览 '{browse_type}' 内容...", user_id, account_id)
def should_stop():
return account.should_stop
checkpoint_mgr.update_stage(task_id, TaskStage.BROWSING, progress_percent=50)
result = api_browser.browse_content(browse_type=browse_type, should_stop_callback=should_stop)
else:
error_message = "登录失败"
log_to_client(f"{error_message}", user_id, account_id)
is_suspended = database.increment_account_login_fail(account_id, error_message)
if is_suspended:
log_to_client("⚠ 该账号连续3次密码错误已自动暂停", user_id, account_id)
log_to_client("请在前台修改密码后才能继续使用", user_id, account_id)
retry_action = checkpoint_mgr.record_error(task_id, error_message)
if retry_action == "paused":
logger.warning(f"[断点] 任务 {task_id} 已暂停(登录失败)")
account.status = "登录失败"
account.is_running = False
database.create_task_log(
user_id=user_id,
account_id=account_id,
username=account.username,
browse_type=browse_type,
status="failed",
total_items=0,
total_attachments=0,
error_message=error_message,
duration=int(time_module.time() - task_start_time),
source=source,
)
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
return
account.total_items = result.total_items
account.total_attachments = result.total_attachments
if result.success:
log_to_client(
f"浏览完成! 共 {result.total_items} 条内容,{result.total_attachments} 个附件", user_id, account_id
)
safe_update_task_status(
account_id,
{
"detail_status": "浏览完成",
"progress": {"items": result.total_items, "attachments": result.total_attachments},
},
)
account.status = "已完成"
checkpoint_mgr.update_stage(task_id, TaskStage.COMPLETING, progress_percent=95)
checkpoint_mgr.complete_task(task_id, success=True)
logger.info(f"[断点] 任务 {task_id} 已完成")
if not enable_screenshot:
database.create_task_log(
user_id=user_id,
account_id=account_id,
username=account.username,
browse_type=browse_type,
status="success",
total_items=result.total_items,
total_attachments=result.total_attachments,
error_message="",
duration=int(time_module.time() - task_start_time),
source=source,
)
if batch_id:
account_name = account.remark if account.remark else account.username
_batch_task_record_result(
batch_id=batch_id,
account_name=account_name,
screenshot_path=None,
total_items=result.total_items,
total_attachments=result.total_attachments,
)
batch_recorded = True
elif source and source.startswith("user_scheduled"):
try:
user_info = database.get_user_by_id(user_id)
if user_info and user_info.get("email") and database.get_user_email_notify(user_id):
account_name = account.remark if account.remark else account.username
email_service.send_task_complete_email_async(
user_id=user_id,
email=user_info["email"],
username=user_info["username"],
account_name=account_name,
browse_type=browse_type,
total_items=result.total_items,
total_attachments=result.total_attachments,
screenshot_path=None,
log_callback=lambda msg: log_to_client(msg, user_id, account_id),
)
except Exception as email_error:
logger.warning(f"发送任务完成邮件失败: {email_error}")
break
error_msg = result.error_message
if "Timeout" in error_msg or "timeout" in error_msg:
log_to_client(f"⚠ 检测到超时错误: {error_msg}", user_id, account_id)
if account.automation:
try:
account.automation.close()
log_to_client("已关闭超时的浏览器实例", user_id, account_id)
except Exception as e:
logger.debug(f"关闭超时浏览器实例失败: {e}")
account.automation = None
if attempt < max_attempts:
log_to_client(f"⚠ 代理可能速度过慢将换新IP重试 ({attempt}/{max_attempts})", user_id, account_id)
time_module.sleep(2)
continue
log_to_client(f"❌ 已达到最大重试次数({max_attempts}),任务失败", user_id, account_id)
account.status = "出错"
database.create_task_log(
user_id=user_id,
account_id=account_id,
username=account.username,
browse_type=browse_type,
status="failed",
total_items=result.total_items,
total_attachments=result.total_attachments,
error_message=f"重试{max_attempts}次后仍失败: {error_msg}",
duration=int(time_module.time() - task_start_time),
)
break
log_to_client(f"浏览出错: {error_msg}", user_id, account_id)
account.status = "出错"
database.create_task_log(
user_id=user_id,
account_id=account_id,
username=account.username,
browse_type=browse_type,
status="failed",
total_items=result.total_items,
total_attachments=result.total_attachments,
error_message=error_msg,
duration=int(time_module.time() - task_start_time),
source=source,
)
break
except Exception as retry_error:
error_msg = str(retry_error)
if account.automation:
try:
account.automation.close()
except Exception as e:
logger.debug(f"关闭浏览器实例失败: {e}")
account.automation = None
if "Timeout" in error_msg or "timeout" in error_msg:
log_to_client(f"⚠ 执行超时: {error_msg}", user_id, account_id)
if attempt < max_attempts:
log_to_client(f"⚠ 将换新IP重试 ({attempt}/{max_attempts})", user_id, account_id)
time_module.sleep(2)
continue
log_to_client(f"❌ 已达到最大重试次数({max_attempts}),任务失败", user_id, account_id)
account.status = "出错"
database.create_task_log(
user_id=user_id,
account_id=account_id,
username=account.username,
browse_type=browse_type,
status="failed",
total_items=account.total_items,
total_attachments=account.total_attachments,
error_message=f"重试{max_attempts}次后仍失败: {error_msg}",
duration=int(time_module.time() - task_start_time),
source=source,
)
break
log_to_client(f"任务执行异常: {error_msg}", user_id, account_id)
account.status = "出错"
database.create_task_log(
user_id=user_id,
account_id=account_id,
username=account.username,
browse_type=browse_type,
status="failed",
total_items=account.total_items,
total_attachments=account.total_attachments,
error_message=error_msg,
duration=int(time_module.time() - task_start_time),
source=source,
)
break
except Exception as e:
error_msg = str(e)
log_to_client(f"任务执行出错: {error_msg}", user_id, account_id)
account.status = "出错"
database.create_task_log(
user_id=user_id,
account_id=account_id,
username=account.username,
browse_type=browse_type,
status="failed",
total_items=account.total_items,
total_attachments=account.total_attachments,
error_message=error_msg,
duration=int(time_module.time() - task_start_time),
source=source,
)
finally:
account.is_running = False
screenshot_submitted = False
if account.status not in ["已完成"]:
account.status = "未开始"
if account.automation:
try:
account.automation.close()
except Exception as e:
log_to_client(f"关闭主任务浏览器时出错: {str(e)}", user_id, account_id)
finally:
account.automation = None
safe_remove_task(account_id)
safe_remove_task_status(account_id)
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
if account.status == "已完成" and not account.should_stop:
if enable_screenshot:
log_to_client("等待2秒后开始截图...", user_id, account_id)
account.status = "等待截图"
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
import time as time_mod
safe_set_task_status(
account_id,
{
"user_id": user_id,
"username": account.username,
"status": "排队中",
"detail_status": "等待截图资源",
"browse_type": browse_type,
"start_time": time_mod.time(),
"source": source,
"progress": {
"items": result.total_items if result else 0,
"attachments": result.total_attachments if result else 0,
},
},
)
time.sleep(2)
browse_result_dict = {"total_items": result.total_items, "total_attachments": result.total_attachments}
screenshot_submitted = True
threading.Thread(
target=take_screenshot_for_account,
args=(user_id, account_id, browse_type, source, task_start_time, browse_result_dict),
daemon=True,
).start()
else:
account.status = "未开始"
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
log_to_client("截图功能已禁用,跳过截图", user_id, account_id)
else:
if account.status not in ["登录失败", "出错"]:
account.status = "未开始"
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
elif account.status == "出错" and retry_count < MAX_AUTO_RETRY:
log_to_client(
f"⚠ 任务执行失败5秒后自动重试 ({retry_count + 1}/{MAX_AUTO_RETRY})...", user_id, account_id
)
account.status = "等待重试"
_emit("account_update", account.to_dict(), room=f"user_{user_id}")
def delayed_retry_submit():
if account.should_stop:
return
log_to_client(f"🔄 开始第 {retry_count + 1} 次自动重试...", user_id, account_id)
ok, msg = submit_account_task(
user_id=user_id,
account_id=account_id,
browse_type=browse_type,
enable_screenshot=enable_screenshot,
source=source,
retry_count=retry_count + 1,
)
if not ok:
log_to_client(f"自动重试提交失败: {msg}", user_id, account_id)
try:
threading.Timer(5, delayed_retry_submit).start()
except Exception:
delayed_retry_submit()
if batch_id and (not screenshot_submitted) and (not batch_recorded) and account.status != "等待重试":
account_name = account.remark if account.remark else account.username
_batch_task_record_result(
batch_id=batch_id,
account_name=account_name,
screenshot_path=None,
total_items=getattr(account, "total_items", 0) or 0,
total_attachments=getattr(account, "total_attachments", 0) or 0,
)
batch_recorded = True
finally:
pass

14
services/time_utils.py Normal file
View File

@@ -0,0 +1,14 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from datetime import datetime
import pytz
BEIJING_TZ = pytz.timezone("Asia/Shanghai")
def get_beijing_now() -> datetime:
return datetime.now(BEIJING_TZ)