Files
project-carrier/replication_service/scheduler.py
2026-03-29 23:24:15 +09:00

276 lines
12 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_EXECUTED
from datetime import datetime, timedelta
from typing import List, Optional
import logging
import threading
from database import PostgresSessionLocal, DatabaseManager
from models import MigrationTable, ReplicationStatus, ReplicationJob, DataSource
from replication import ReplicationService
logger = logging.getLogger(__name__)
class SchedulerManager:
"""Управление расписанием репликации таблиц с использованием DataSource"""
def __init__(self):
self.scheduler = BackgroundScheduler()
self.scheduler.add_listener(self._scheduler_listener)
self.running_jobs = {} # Отслеживание текущих запущенных задач
self.job_queue = [] # Очередь задач при пересечении расписания
self.lock = threading.Lock()
self.replication_service = ReplicationService()
def start(self):
"""Запустить планировщик"""
if not self.scheduler.running:
self.scheduler.start()
logger.info("Scheduler started")
self._load_migration_tables()
def stop(self):
"""Остановить планировщик"""
if self.scheduler.running:
self.scheduler.shutdown()
logger.info("Scheduler stopped")
def _load_migration_tables(self):
"""Загрузить таблицы для миграции и установить расписание"""
try:
session = PostgresSessionLocal()
tables = session.query(MigrationTable).filter(
MigrationTable.is_active == True
).all()
for table in tables:
self._schedule_table(table)
source = session.query(DataSource).filter(DataSource.id == table.source_id).first()
source_name = f"{source.name}" if source else f"Source{table.source_id}"
logger.info(f"Scheduled table {table.table_name} from {source_name} with cron: {table.cron_schedule}")
session.close()
except Exception as e:
logger.error(f"Error loading migration tables: {e}")
def _schedule_table(self, table: MigrationTable):
"""Установить расписание для конкретной таблицы"""
try:
job_id = f"replicate_{table.id}_{table.table_name}"
# Удалить старую задачу если существует
if self.scheduler.get_job(job_id):
self.scheduler.remove_job(job_id)
# Парсить cron выражение
cron_trigger = CronTrigger.from_crontab(table.cron_schedule)
# Добавить новую задачу со всеми новыми параметрами
self.scheduler.add_job(
self._execute_replication,
cron_trigger,
id=job_id,
args=[
table.id,
table.table_name,
table.source_id,
table.source_schema,
table.target_schema,
table.target_id if hasattr(table, 'target_id') else None,
table.target_table_name if hasattr(table, 'target_table_name') else None,
table.column_mapping if hasattr(table, 'column_mapping') else None,
table.use_life_table if hasattr(table, 'use_life_table') else False,
table.life_excluded_fields if hasattr(table, 'life_excluded_fields') else None
],
name=f"Replicate {table.table_name} (SourceID: {table.source_id})",
max_instances=1 # Только одно выполнение одновременно для этой таблицы
)
logger.info(f"Scheduled table {table.table_name} with source_id {table.source_id}")
except Exception as e:
logger.error(f"Error scheduling table {table.table_name}: {e}")
def _execute_replication(
self,
table_id: int,
table_name: str,
source_id: int,
source_schema: Optional[str],
target_schema: str,
target_id: Optional[int] = None,
target_table_name: Optional[str] = None,
column_mapping: Optional[dict] = None,
use_life_table: bool = False,
life_excluded_fields: Optional[list] = None
):
"""Выполнить репликацию таблицы со всеми параметрами"""
with self.lock:
# Проверить нет ли уже выполняющихся задач
if table_id in self.running_jobs:
logger.info(f"Table {table_name} (source_id={source_id}) is already being replicated, queuing job")
self.job_queue.append({
'table_id': table_id,
'table_name': table_name,
'source_id': source_id,
'source_schema': source_schema,
'target_schema': target_schema,
'target_id': target_id,
'target_table_name': target_table_name,
'column_mapping': column_mapping,
'use_life_table': use_life_table,
'life_excluded_fields': life_excluded_fields
})
return
# Отметить как выполняющаяся
job_id = datetime.utcnow().timestamp()
self.running_jobs[table_id] = job_id
try:
logger.info(f"Starting replication for table {table_name} from source_id={source_id} to target_id={target_id or 'default'}")
# Получить информацию об источнике
session = PostgresSessionLocal()
data_source = session.query(DataSource).filter(DataSource.id == source_id).first()
source_name = f"{data_source.name}" if data_source else f"Source{source_id}"
# Создать запись о задаче
job_record = self.replication_service.create_replication_job(table_id, table_name)
session.close()
# Выполнить репликацию с поддержкой всех новых параметров
success, rows_count = self.replication_service.replicate_table(
table_name=table_name,
source_id=source_id,
source_schema=source_schema,
target_schema=target_schema,
target_id=target_id,
target_table_name=target_table_name,
column_mapping=column_mapping,
life_excluded_fields=life_excluded_fields
)
# Обработать изменения из Life таблицы (если включено)
life_changes = {"INSERT": 0, "UPDATE": 0, "DELETE": 0}
if data_source and use_life_table:
try:
life_changes = self.replication_service.process_life_table_changes(
table_name=table_name,
source_id=source_id,
use_life_table=use_life_table,
life_excluded_fields=life_excluded_fields,
target_id=target_id,
target_table_name=target_table_name,
column_mapping=column_mapping
)
except Exception as e:
logger.debug(f"Error processing Life table changes: {e}")
if success:
self.replication_service.update_job_status(
job_record.id,
ReplicationStatus.SUCCESS,
rows_count
)
logger.info(
f"Successfully replicated table {table_name} from {source_name} to target_id={target_id or 'default'}. "
f"Rows: {rows_count}, "
f"Changes - INSERT: {life_changes['INSERT']}, "
f"UPDATE: {life_changes['UPDATE']}, "
f"DELETE: {life_changes['DELETE']}"
)
else:
self.replication_service.update_job_status(
job_record.id,
ReplicationStatus.FAILED,
error_message=f"Failed to replicate {table_name} from {source_name}"
)
logger.error(f"Failed to replicate table {table_name} from {source_name}")
except Exception as e:
logger.error(f"Error during replication of {table_name} from source_id={source_id}: {e}", exc_info=True)
try:
self.replication_service.update_job_status(
job_record.id,
ReplicationStatus.FAILED,
error_message=str(e)
)
except:
pass
finally:
# Удалить из выполняющихся
with self.lock:
if table_id in self.running_jobs:
del self.running_jobs[table_id]
# Выполнить следующую задачу из очереди
if self.job_queue:
next_job = self.job_queue.pop(0)
logger.info(f"Processing queued job for {next_job['table_name']} (source_id={next_job['source_id']})")
self._execute_replication(
next_job['table_id'],
next_job['table_name'],
next_job['source_id'],
next_job['source_schema'],
next_job['target_schema'],
next_job.get('target_id'),
next_job.get('target_table_name'),
next_job.get('column_mapping'),
next_job.get('use_life_table', False),
next_job.get('life_excluded_fields')
)
def add_job(self, migration_table: MigrationTable):
"""Добавить таблицу в расписание"""
self._schedule_table(migration_table)
def remove_job(self, table_id: int):
"""Удалить таблицу из расписания"""
try:
job_id = f"replicate_{table_id}_*"
# Получить все jobs с этим префиксом
for job in self.scheduler.get_jobs():
if job.id.startswith(f"replicate_{table_id}_"):
self.scheduler.remove_job(job.id)
logger.info(f"Removed scheduled job for table_id {table_id}")
return True
return False
except Exception as e:
logger.error(f"Error removing job for table_id {table_id}: {e}")
return False
def _scheduler_listener(self, event):
"""Listener для событий планировщика"""
if event.code == 'EVENT_JOB_ERROR':
logger.error(f"Job failed with exception")
else:
logger.debug(f"Job executed successfully")
def get_status(self) -> dict:
"""Получить статус планировщика"""
return {
"is_running": self.scheduler.running,
"running_jobs": len(self.running_jobs),
"queued_jobs": len(self.job_queue),
"total_jobs": len(self.scheduler.get_jobs())
}
def get_migration_tables(self) -> List[MigrationTable]:
"""Получить список всех активных таблиц миграции"""
session = PostgresSessionLocal()
try:
tables = session.query(MigrationTable).filter(
MigrationTable.is_active == True
).all()
return tables
finally:
session.close()
# Глобальный экземпляр SchedulerManager
scheduler_manager = SchedulerManager()