diff --git a/backend/migration/src/lib.rs b/backend/migration/src/lib.rs index dacd2f1..d09ac14 100644 --- a/backend/migration/src/lib.rs +++ b/backend/migration/src/lib.rs @@ -25,6 +25,10 @@ mod m20220101_000017_create_flow_run_logs; mod m20220101_000018_add_flow_code_to_flow_run_logs; // 新增:计划任务表 mod m20220101_000019_create_schedule_jobs; +// 新增:为 flow_run_logs 创建复合索引 +mod m20220101_000020_add_indexes_to_flow_run_logs; +// 修复 flow_run_logs.flow_id 类型为 BIGINT +mod m20220101_000021_alter_flow_run_logs_flow_id_to_bigint; pub struct Migrator; @@ -57,8 +61,12 @@ impl MigratorTrait for Migrator { Box::new(m20220101_000017_create_flow_run_logs::Migration), // 新增:为 flow_run_logs 添加 flow_code 列 Box::new(m20220101_000018_add_flow_code_to_flow_run_logs::Migration), - // 新增:计划任务表 + // 新增:计划任务表(恢复注册) Box::new(m20220101_000019_create_schedule_jobs::Migration), + // 修复 flow_run_logs.flow_id 类型为 BIGINT + Box::new(m20220101_000021_alter_flow_run_logs_flow_id_to_bigint::Migration), + // 新增:为 flow_run_logs 创建复合索引 + Box::new(m20220101_000020_add_indexes_to_flow_run_logs::Migration), ] } } \ No newline at end of file diff --git a/backend/migration/src/m20220101_000020_add_indexes_to_flow_run_logs.rs b/backend/migration/src/m20220101_000020_add_indexes_to_flow_run_logs.rs new file mode 100644 index 0000000..029ead3 --- /dev/null +++ b/backend/migration/src/m20220101_000020_add_indexes_to_flow_run_logs.rs @@ -0,0 +1,93 @@ +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + // 覆盖常用排序与过滤的索引 + // 1) 无过滤:ORDER BY started_at DESC, id DESC 使用 (started_at, id) + manager + .create_index( + Index::create() + .if_not_exists() + .name("idx_flow_run_logs_started_at_id") + .table(FlowRunLogs::Table) + .col(FlowRunLogs::StartedAt) + .col(FlowRunLogs::Id) + .to_owned(), + ) + .await?; + + // 2) flow_id = ? 且 ORDER BY started_at DESC, id DESC 使用 (flow_id, started_at, id) + manager + .create_index( + Index::create() + .if_not_exists() + .name("idx_flow_run_logs_flow_id_started_at_id") + .table(FlowRunLogs::Table) + .col(FlowRunLogs::FlowId) + .col(FlowRunLogs::StartedAt) + .col(FlowRunLogs::Id) + .to_owned(), + ) + .await?; + + // 3) flow_code = ? 且 ORDER BY started_at DESC, id DESC 使用 (flow_code, started_at, id) + manager + .create_index( + Index::create() + .if_not_exists() + .name("idx_flow_run_logs_flow_code_started_at_id") + .table(FlowRunLogs::Table) + .col(FlowRunLogs::FlowCode) + .col(FlowRunLogs::StartedAt) + .col(FlowRunLogs::Id) + .to_owned(), + ) + .await?; + + // 4) ok = ? 且 ORDER BY started_at DESC, id DESC 使用 (ok, started_at, id) + manager + .create_index( + Index::create() + .if_not_exists() + .name("idx_flow_run_logs_ok_started_at_id") + .table(FlowRunLogs::Table) + .col(FlowRunLogs::Ok) + .col(FlowRunLogs::StartedAt) + .col(FlowRunLogs::Id) + .to_owned(), + ) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .drop_index(Index::drop().name("idx_flow_run_logs_started_at_id").table(FlowRunLogs::Table).to_owned()) + .await?; + manager + .drop_index(Index::drop().name("idx_flow_run_logs_flow_id_started_at_id").table(FlowRunLogs::Table).to_owned()) + .await?; + manager + .drop_index(Index::drop().name("idx_flow_run_logs_flow_code_started_at_id").table(FlowRunLogs::Table).to_owned()) + .await?; + manager + .drop_index(Index::drop().name("idx_flow_run_logs_ok_started_at_id").table(FlowRunLogs::Table).to_owned()) + .await + } +} + +#[derive(DeriveIden)] +enum FlowRunLogs { + #[sea_orm(iden = "flow_run_logs")] + Table, + Id, + FlowId, + FlowCode, + StartedAt, + Ok, +} \ No newline at end of file diff --git a/backend/migration/src/m20220101_000021_alter_flow_run_logs_flow_id_to_bigint.rs b/backend/migration/src/m20220101_000021_alter_flow_run_logs_flow_id_to_bigint.rs new file mode 100644 index 0000000..45c5b6b --- /dev/null +++ b/backend/migration/src/m20220101_000021_alter_flow_run_logs_flow_id_to_bigint.rs @@ -0,0 +1,40 @@ +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + // 将 flow_id 从 VARCHAR(64) 改为 BIGINT,以匹配实体模型与查询参数类型 + manager + .alter_table( + Table::alter() + .table(FlowRunLogs::Table) + .modify_column(ColumnDef::new(FlowRunLogs::FlowId).big_integer().not_null()) + .to_owned(), + ) + .await? + ; + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + // 回滚:将 flow_id 改回 VARCHAR(64) + manager + .alter_table( + Table::alter() + .table(FlowRunLogs::Table) + .modify_column(ColumnDef::new(FlowRunLogs::FlowId).string_len(64).not_null()) + .to_owned(), + ) + .await + } +} + +#[derive(DeriveIden)] +enum FlowRunLogs { + #[sea_orm(iden = "flow_run_logs")] + Table, + FlowId, +} \ No newline at end of file diff --git a/backend/src/services/flow_run_log_service.rs b/backend/src/services/flow_run_log_service.rs index 2eb68cb..8e75116 100644 --- a/backend/src/services/flow_run_log_service.rs +++ b/backend/src/services/flow_run_log_service.rs @@ -1,5 +1,6 @@ use crate::{db::Db, models::flow_run_log}; use sea_orm::{ActiveModelTrait, Set, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder, ColumnTrait}; +use sea_orm::QuerySelect; use chrono::{DateTime, FixedOffset, Utc}; #[derive(serde::Serialize)] @@ -61,18 +62,71 @@ pub async fn create(db: &Db, input: CreateRunLogInput) -> anyhow::Result { Ok(m.id) } + +/// 分页查询流程运行日志 +/// +/// # 参数 +/// * `db` - 数据库连接 +/// * `p` - 查询参数,包含页码、每页大小、流程ID、流程代码、用户名、运行状态等过滤条件 +/// +/// # 返回值 +/// * `Ok(PageResp)` - 分页查询结果,包含日志列表和分页信息 +/// * `Err(anyhow::Error)` - 查询失败的错误信息 pub async fn list(db: &Db, p: ListParams) -> anyhow::Result> { - let page = p.page.unwrap_or(1); let page_size = p.page_size.unwrap_or(10); - let mut selector = flow_run_log::Entity::find(); - if let Some(fid) = p.flow_id { selector = selector.filter(flow_run_log::Column::FlowId.eq(fid)); } - if let Some(fcode) = p.flow_code { selector = selector.filter(flow_run_log::Column::FlowCode.eq(fcode)); } - if let Some(u) = p.user { - let like = format!("%{}%", u); - selector = selector.filter(flow_run_log::Column::Username.like(like)); + let page = p.page.unwrap_or(1).max(1); + let page_size = p.page_size.unwrap_or(10).max(1); + + // 公用查询条件 + let mut base_selector = flow_run_log::Entity::find(); + if let Some(fid) = p.flow_id { + base_selector = base_selector.filter(flow_run_log::Column::FlowId.eq(fid)); } - if let Some(ok) = p.ok { selector = selector.filter(flow_run_log::Column::Ok.eq(ok)); } - let paginator = selector.order_by_desc(flow_run_log::Column::Id).paginate(db, page_size); - let total = paginator.num_items().await? as u64; - let models = paginator.fetch_page(if page>0 { page-1 } else { 0 }).await?; - Ok(PageResp { items: models.into_iter().map(Into::into).collect(), total, page, page_size }) + if let Some(fcode) = p.flow_code.as_ref() { + base_selector = base_selector.filter(flow_run_log::Column::FlowCode.eq(fcode.clone())); + } + if let Some(u) = p.user.as_ref() { + let like = format!("%{}%", u.replace('%', "\\%").replace('_', "\\_")); + base_selector = base_selector.filter(flow_run_log::Column::Username.like(like)); + } + if let Some(ok) = p.ok { + base_selector = base_selector.filter(flow_run_log::Column::Ok.eq(ok)); + } + + // 总数只查一次,保持和老接口兼容 + let total = base_selector.clone().count(db).await? as u64; + + // 计算偏移量 + let offset = (page - 1) * page_size; + + // ⭐ 关键优化:判断前半区 / 后半区 + let models = if offset > total / 2 { + // ---- 末页优化:用升序 + limit + reverse ---- + let start_idx = total.saturating_sub(page * page_size); + let asc_selector = base_selector.clone(); + let mut rows = asc_selector + .order_by_asc(flow_run_log::Column::StartedAt) + .order_by_asc(flow_run_log::Column::Id) + .offset(start_idx) + .limit(page_size) + .all(db) + .await?; + rows.reverse(); + rows + } else { + // ---- 常规分页 ---- + base_selector + .order_by_desc(flow_run_log::Column::StartedAt) + .order_by_desc(flow_run_log::Column::Id) + .offset(offset) + .limit(page_size) + .all(db) + .await? + }; + + Ok(PageResp { + items: models.into_iter().map(Into::into).collect(), + total, + page, + page_size, + }) } \ No newline at end of file