feat(migration): 添加 flow_run_logs 复合索引并修复 flow_id 类型

优化 flow_run_logs 查询性能,添加常用排序和过滤的复合索引
将 flow_id 从 VARCHAR(64) 改为 BIGINT 以匹配实体模型
在分页查询中实现末页优化策略
This commit is contained in:
2025-09-25 22:38:06 +08:00
parent dfa1cbdd2f
commit a71bbb0961
4 changed files with 208 additions and 13 deletions

View File

@ -25,6 +25,10 @@ mod m20220101_000017_create_flow_run_logs;
mod m20220101_000018_add_flow_code_to_flow_run_logs; mod m20220101_000018_add_flow_code_to_flow_run_logs;
// 新增:计划任务表 // 新增:计划任务表
mod m20220101_000019_create_schedule_jobs; mod m20220101_000019_create_schedule_jobs;
// 新增:为 flow_run_logs 创建复合索引
mod m20220101_000020_add_indexes_to_flow_run_logs;
// 修复 flow_run_logs.flow_id 类型为 BIGINT
mod m20220101_000021_alter_flow_run_logs_flow_id_to_bigint;
pub struct Migrator; pub struct Migrator;
@ -57,8 +61,12 @@ impl MigratorTrait for Migrator {
Box::new(m20220101_000017_create_flow_run_logs::Migration), Box::new(m20220101_000017_create_flow_run_logs::Migration),
// 新增:为 flow_run_logs 添加 flow_code 列 // 新增:为 flow_run_logs 添加 flow_code 列
Box::new(m20220101_000018_add_flow_code_to_flow_run_logs::Migration), Box::new(m20220101_000018_add_flow_code_to_flow_run_logs::Migration),
// 新增:计划任务表 // 新增:计划任务表(恢复注册)
Box::new(m20220101_000019_create_schedule_jobs::Migration), Box::new(m20220101_000019_create_schedule_jobs::Migration),
// 修复 flow_run_logs.flow_id 类型为 BIGINT
Box::new(m20220101_000021_alter_flow_run_logs_flow_id_to_bigint::Migration),
// 新增:为 flow_run_logs 创建复合索引
Box::new(m20220101_000020_add_indexes_to_flow_run_logs::Migration),
] ]
} }
} }

View File

@ -0,0 +1,93 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
// 覆盖常用排序与过滤的索引
// 1) 无过滤ORDER BY started_at DESC, id DESC 使用 (started_at, id)
manager
.create_index(
Index::create()
.if_not_exists()
.name("idx_flow_run_logs_started_at_id")
.table(FlowRunLogs::Table)
.col(FlowRunLogs::StartedAt)
.col(FlowRunLogs::Id)
.to_owned(),
)
.await?;
// 2) flow_id = ? 且 ORDER BY started_at DESC, id DESC 使用 (flow_id, started_at, id)
manager
.create_index(
Index::create()
.if_not_exists()
.name("idx_flow_run_logs_flow_id_started_at_id")
.table(FlowRunLogs::Table)
.col(FlowRunLogs::FlowId)
.col(FlowRunLogs::StartedAt)
.col(FlowRunLogs::Id)
.to_owned(),
)
.await?;
// 3) flow_code = ? 且 ORDER BY started_at DESC, id DESC 使用 (flow_code, started_at, id)
manager
.create_index(
Index::create()
.if_not_exists()
.name("idx_flow_run_logs_flow_code_started_at_id")
.table(FlowRunLogs::Table)
.col(FlowRunLogs::FlowCode)
.col(FlowRunLogs::StartedAt)
.col(FlowRunLogs::Id)
.to_owned(),
)
.await?;
// 4) ok = ? 且 ORDER BY started_at DESC, id DESC 使用 (ok, started_at, id)
manager
.create_index(
Index::create()
.if_not_exists()
.name("idx_flow_run_logs_ok_started_at_id")
.table(FlowRunLogs::Table)
.col(FlowRunLogs::Ok)
.col(FlowRunLogs::StartedAt)
.col(FlowRunLogs::Id)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_index(Index::drop().name("idx_flow_run_logs_started_at_id").table(FlowRunLogs::Table).to_owned())
.await?;
manager
.drop_index(Index::drop().name("idx_flow_run_logs_flow_id_started_at_id").table(FlowRunLogs::Table).to_owned())
.await?;
manager
.drop_index(Index::drop().name("idx_flow_run_logs_flow_code_started_at_id").table(FlowRunLogs::Table).to_owned())
.await?;
manager
.drop_index(Index::drop().name("idx_flow_run_logs_ok_started_at_id").table(FlowRunLogs::Table).to_owned())
.await
}
}
#[derive(DeriveIden)]
enum FlowRunLogs {
#[sea_orm(iden = "flow_run_logs")]
Table,
Id,
FlowId,
FlowCode,
StartedAt,
Ok,
}

View File

@ -0,0 +1,40 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
// 将 flow_id 从 VARCHAR(64) 改为 BIGINT以匹配实体模型与查询参数类型
manager
.alter_table(
Table::alter()
.table(FlowRunLogs::Table)
.modify_column(ColumnDef::new(FlowRunLogs::FlowId).big_integer().not_null())
.to_owned(),
)
.await?
;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
// 回滚:将 flow_id 改回 VARCHAR(64)
manager
.alter_table(
Table::alter()
.table(FlowRunLogs::Table)
.modify_column(ColumnDef::new(FlowRunLogs::FlowId).string_len(64).not_null())
.to_owned(),
)
.await
}
}
#[derive(DeriveIden)]
enum FlowRunLogs {
#[sea_orm(iden = "flow_run_logs")]
Table,
FlowId,
}

View File

@ -1,5 +1,6 @@
use crate::{db::Db, models::flow_run_log}; use crate::{db::Db, models::flow_run_log};
use sea_orm::{ActiveModelTrait, Set, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder, ColumnTrait}; use sea_orm::{ActiveModelTrait, Set, EntityTrait, PaginatorTrait, QueryFilter, QueryOrder, ColumnTrait};
use sea_orm::QuerySelect;
use chrono::{DateTime, FixedOffset, Utc}; use chrono::{DateTime, FixedOffset, Utc};
#[derive(serde::Serialize)] #[derive(serde::Serialize)]
@ -61,18 +62,71 @@ pub async fn create(db: &Db, input: CreateRunLogInput) -> anyhow::Result<i64> {
Ok(m.id) Ok(m.id)
} }
/// 分页查询流程运行日志
///
/// # 参数
/// * `db` - 数据库连接
/// * `p` - 查询参数包含页码、每页大小、流程ID、流程代码、用户名、运行状态等过滤条件
///
/// # 返回值
/// * `Ok(PageResp<RunLogItem>)` - 分页查询结果,包含日志列表和分页信息
/// * `Err(anyhow::Error)` - 查询失败的错误信息
pub async fn list(db: &Db, p: ListParams) -> anyhow::Result<PageResp<RunLogItem>> { pub async fn list(db: &Db, p: ListParams) -> anyhow::Result<PageResp<RunLogItem>> {
let page = p.page.unwrap_or(1); let page_size = p.page_size.unwrap_or(10); let page = p.page.unwrap_or(1).max(1);
let mut selector = flow_run_log::Entity::find(); let page_size = p.page_size.unwrap_or(10).max(1);
if let Some(fid) = p.flow_id { selector = selector.filter(flow_run_log::Column::FlowId.eq(fid)); }
if let Some(fcode) = p.flow_code { selector = selector.filter(flow_run_log::Column::FlowCode.eq(fcode)); } // 公用查询条件
if let Some(u) = p.user { let mut base_selector = flow_run_log::Entity::find();
let like = format!("%{}%", u); if let Some(fid) = p.flow_id {
selector = selector.filter(flow_run_log::Column::Username.like(like)); base_selector = base_selector.filter(flow_run_log::Column::FlowId.eq(fid));
} }
if let Some(ok) = p.ok { selector = selector.filter(flow_run_log::Column::Ok.eq(ok)); } if let Some(fcode) = p.flow_code.as_ref() {
let paginator = selector.order_by_desc(flow_run_log::Column::Id).paginate(db, page_size); base_selector = base_selector.filter(flow_run_log::Column::FlowCode.eq(fcode.clone()));
let total = paginator.num_items().await? as u64; }
let models = paginator.fetch_page(if page>0 { page-1 } else { 0 }).await?; if let Some(u) = p.user.as_ref() {
Ok(PageResp { items: models.into_iter().map(Into::into).collect(), total, page, page_size }) let like = format!("%{}%", u.replace('%', "\\%").replace('_', "\\_"));
base_selector = base_selector.filter(flow_run_log::Column::Username.like(like));
}
if let Some(ok) = p.ok {
base_selector = base_selector.filter(flow_run_log::Column::Ok.eq(ok));
}
// 总数只查一次,保持和老接口兼容
let total = base_selector.clone().count(db).await? as u64;
// 计算偏移量
let offset = (page - 1) * page_size;
// ⭐ 关键优化:判断前半区 / 后半区
let models = if offset > total / 2 {
// ---- 末页优化:用升序 + limit + reverse ----
let start_idx = total.saturating_sub(page * page_size);
let asc_selector = base_selector.clone();
let mut rows = asc_selector
.order_by_asc(flow_run_log::Column::StartedAt)
.order_by_asc(flow_run_log::Column::Id)
.offset(start_idx)
.limit(page_size)
.all(db)
.await?;
rows.reverse();
rows
} else {
// ---- 常规分页 ----
base_selector
.order_by_desc(flow_run_log::Column::StartedAt)
.order_by_desc(flow_run_log::Column::Id)
.offset(offset)
.limit(page_size)
.all(db)
.await?
};
Ok(PageResp {
items: models.into_iter().map(Into::into).collect(),
total,
page,
page_size,
})
} }