Files
junhong_cmp_fiber/scripts/perf_query/seed.go
huang f32d32cd36
All checks were successful
构建并部署到测试环境(无 SSH) / build-and-deploy (push) Successful in 7m6s
perf: IoT 卡 30M 行分页查询优化(P95 17.9s → <500ms)
- 新增 is_standalone 物化列 + 触发器自动维护(迁移 056)
- 并行查询拆分:多店铺 IN 查询拆为 per-shop goroutine 并行 Index Scan
- 两阶段延迟 Join:深度分页(page≥50)走覆盖索引 Index Only Scan 取 ID 再回表
- COUNT 缓存:per-shop 并行 COUNT + Redis 30 分钟 TTL
- 索引优化:删除有害全局索引、新增 partial composite indexes(迁移 057/058)
- ICCID 模糊搜索路径隔离:trigram GIN 索引走独立查询路径
- 慢查询阈值从 100ms 调整为 500ms
- 新增 30M 测试数据种子脚本和 benchmark 工具
2026-02-24 16:23:02 +08:00

701 lines
23 KiB
Go
Raw Blame History

This file contains invisible Unicode characters
This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
//go:build ignore
// 性能测试数据生成/清理脚本
// 用法:
// source .env.local
// go run ./scripts/perf_query/seed.go # 默认生成 3000 万卡
// go run ./scripts/perf_query/seed.go -total 1000000 # 生成 100 万卡(试跑)
// go run ./scripts/perf_query/seed.go -action cleanup # 清理测试数据
// go run ./scripts/perf_query/seed.go -action verify # 验证数据分布
// go run ./scripts/perf_query/seed.go -action add-index # 创建优化索引
// go run ./scripts/perf_query/seed.go -action drop-index # 删除优化索引
package main
import (
"database/sql"
"flag"
"fmt"
"log"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
_ "github.com/jackc/pgx/v5/stdlib"
"golang.org/x/crypto/bcrypt"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
// 测试数据标记常量
const (
testBatchNo = "PERF-TEST-30M"
testShopCodePrefix = "PERF-TEST-SHOP-"
testAgentUsername = "perf_test_agent"
testAgentPassword = "PerfTest@123456"
numShops = 200
agentChildShops = 6 // 代理商有 6 个下级店铺(自己 + 6 = 7 个总共)
)
// 简化模型(仅用于创建测试数据,不引入项目内部包)
type testShop struct {
ID uint `gorm:"primaryKey"`
ShopName string `gorm:"column:shop_name"`
ShopCode string `gorm:"column:shop_code"`
ParentID *uint `gorm:"column:parent_id"`
Level int `gorm:"column:level"`
ContactName string `gorm:"column:contact_name"`
ContactPhone string `gorm:"column:contact_phone"`
Status int `gorm:"column:status"`
Creator uint `gorm:"column:creator"`
Updater uint `gorm:"column:updater"`
CreatedAt time.Time
UpdatedAt time.Time
}
func (testShop) TableName() string { return "tb_shop" }
type testAccount struct {
ID uint `gorm:"primaryKey"`
Username string `gorm:"column:username"`
Phone string `gorm:"column:phone"`
Password string `gorm:"column:password"`
UserType int `gorm:"column:user_type"`
ShopID *uint `gorm:"column:shop_id"`
EnterpriseID *uint `gorm:"column:enterprise_id"`
Status int `gorm:"column:status"`
Creator uint `gorm:"column:creator"`
Updater uint `gorm:"column:updater"`
CreatedAt time.Time
UpdatedAt time.Time
}
func (testAccount) TableName() string { return "tb_account" }
var (
action = flag.String("action", "seed", "操作: seed / cleanup / verify / add-index / drop-index")
total = flag.Int("total", 30000000, "生成卡总数")
batchSize = flag.Int("batch", 5000000, "每批插入数量")
)
func buildDSN() string {
return fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable TimeZone=Asia/Shanghai",
getEnv("JUNHONG_DATABASE_HOST", "cxd.whcxd.cn"),
getEnv("JUNHONG_DATABASE_PORT", "16159"),
getEnv("JUNHONG_DATABASE_USER", "erp_pgsql"),
getEnv("JUNHONG_DATABASE_PASSWORD", "erp_2025"),
getEnv("JUNHONG_DATABASE_DBNAME", "junhong_cmp_test"),
)
}
func main() {
flag.Parse()
db, err := gorm.Open(postgres.Open(buildDSN()), &gorm.Config{
Logger: logger.Default.LogMode(logger.Warn),
})
if err != nil {
log.Fatalf("❌ 连接数据库失败: %v", err)
}
sqlDB, _ := db.DB()
sqlDB.SetMaxOpenConns(5)
sqlDB.SetMaxIdleConns(3)
fmt.Println("✅ 数据库连接成功")
switch *action {
case "seed":
doSeed(db)
case "cleanup":
doCleanup(db)
case "verify":
doVerify(db)
case "add-index":
doAddIndex(db)
case "drop-index":
doDropIndex(db)
default:
log.Fatalf("❌ 未知操作: %s支持: seed / cleanup / verify / add-index / drop-index", *action)
}
}
// ==================== Seed ====================
func doSeed(db *gorm.DB) {
fmt.Println("\n🚀 开始生成性能测试数据UNLOGGED 极速模式)")
fmt.Printf(" 目标卡数: %d\n", *total)
fmt.Printf(" 店铺数量: %d代理商可见 %d 个)\n", numShops, agentChildShops+1)
overallStart := time.Now()
fmt.Println("\n📦 步骤 1/8: 创建测试店铺...")
shopIDs := createTestShops(db)
fmt.Printf(" ✅ 创建了 %d 个店铺\n", len(shopIDs))
fmt.Println("\n👤 步骤 2/8: 创建测试代理商账号...")
createTestAgent(db, shopIDs[0])
fmt.Printf(" ✅ 账号: %s / %s (shop_id=%d)\n", testAgentUsername, testAgentPassword, shopIDs[0])
// 调高 PG 配置减少 checkpoint 频率
fmt.Println("\n⚙ 步骤 3/8: 临时调高 PostgreSQL 写入配置...")
tuneForBulkLoad(db)
// 先删索引再插入,速度提升 10~50 倍
fmt.Println("\n📇 步骤 4/8: 临时删除非主键索引(加速批量插入)...")
droppedIndexes := dropNonPKIndexes(db)
fmt.Printf(" ✅ 删除了 %d 个索引\n", len(droppedIndexes))
// 设置 UNLOGGED 跳过 WAL 写入(表数据少时几乎瞬间完成)
fmt.Println("\n⚡ 步骤 5/8: 设置表为 UNLOGGED跳过 WAL极速写入...")
start := time.Now()
if err := db.Exec("ALTER TABLE tb_iot_card SET UNLOGGED").Error; err != nil {
log.Fatalf("设置 UNLOGGED 失败: %v", err)
}
fmt.Printf(" ✅ 已设置 UNLOGGED (耗时 %v)\n", time.Since(start))
fmt.Println("\n💳 步骤 6/8: 批量插入卡数据UNLOGGED + generate_series...")
insertCards(db, shopIDs)
// 恢复为 LOGGED会重写整张表到 WAL30M 行约需 3-5 分钟)
fmt.Println("\n🔒 步骤 7/8: 恢复表为 LOGGED重写 WAL请耐心等待...")
start = time.Now()
if err := db.Exec("ALTER TABLE tb_iot_card SET LOGGED").Error; err != nil {
// SET LOGGED 失败不致命,表仍然可用,只是崩溃会丢数据
fmt.Printf(" ⚠️ 恢复 LOGGED 失败(测试数据可接受): %v\n", err)
} else {
fmt.Printf(" ✅ 已恢复 LOGGED (耗时 %v)\n", time.Since(start))
}
fmt.Println("\n📇 步骤 8/8: 重建索引 + ANALYZE...")
rebuildIndexes(db, droppedIndexes)
fmt.Printf(" ✅ 重建了 %d 个索引\n", len(droppedIndexes))
start = time.Now()
db.Exec("ANALYZE tb_iot_card")
fmt.Printf(" ✅ ANALYZE 完成 (耗时 %v)\n", time.Since(start))
// 恢复 PG 配置
restoreAfterBulkLoad(db)
fmt.Printf("\n🎉 全部完成!总耗时: %v\n", time.Since(overallStart))
fmt.Println("\n📝 后续步骤:")
fmt.Println(" 1. 启动 API 服务: source .env.local && go run ./cmd/api/...")
fmt.Printf(" 2. 运行基准测试: go run ./scripts/perf_query/bench.go\n")
fmt.Println(" 3. 测试完成后清理: go run ./scripts/perf_query/seed.go -action cleanup")
}
// tuneForBulkLoad 临时调高 PG 配置以加速批量写入
func tuneForBulkLoad(db *gorm.DB) {
settings := []struct {
sql string
desc string
}{
{"ALTER SYSTEM SET max_wal_size = '4GB'", "max_wal_size → 4GB减少 checkpoint 频率)"},
{"ALTER SYSTEM SET checkpoint_timeout = '30min'", "checkpoint_timeout → 30min"},
{"ALTER SYSTEM SET synchronous_commit = 'off'", "synchronous_commit → off不等待 WAL 刷盘)"},
}
for _, s := range settings {
if err := db.Exec(s.sql).Error; err != nil {
fmt.Printf(" ⚠️ %s 失败: %v\n", s.desc, err)
} else {
fmt.Printf(" ✅ %s\n", s.desc)
}
}
db.Exec("SELECT pg_reload_conf()")
fmt.Println(" ✅ 配置已重载")
}
// restoreAfterBulkLoad 恢复 PG 配置
func restoreAfterBulkLoad(db *gorm.DB) {
fmt.Println("\n⚙ 恢复 PostgreSQL 配置...")
db.Exec("ALTER SYSTEM SET max_wal_size = '1GB'")
db.Exec("ALTER SYSTEM SET checkpoint_timeout = '5min'")
db.Exec("ALTER SYSTEM SET synchronous_commit = 'on'")
db.Exec("SELECT pg_reload_conf()")
fmt.Println(" ✅ PG 配置已恢复默认")
}
func createTestShops(db *gorm.DB) []uint {
now := time.Now()
shopIDs := make([]uint, 0, numShops)
// 先清理可能存在的旧测试店铺
db.Exec("DELETE FROM tb_shop WHERE shop_code LIKE ?", testShopCodePrefix+"%")
// 创建根店铺(代理商总店)
rootShop := testShop{
ShopName: "性能测试-总代理",
ShopCode: testShopCodePrefix + "ROOT",
Level: 1,
ContactName: "测试联系人",
ContactPhone: "13800000000",
Status: 1,
Creator: 1,
Updater: 1,
CreatedAt: now,
UpdatedAt: now,
}
if err := db.Create(&rootShop).Error; err != nil {
log.Fatalf("创建根店铺失败: %v", err)
}
shopIDs = append(shopIDs, rootShop.ID)
// 创建 6 个子店铺(代理商下级)
for i := 1; i <= agentChildShops; i++ {
child := testShop{
ShopName: fmt.Sprintf("性能测试-分店%d", i),
ShopCode: fmt.Sprintf("%sSUB-%d", testShopCodePrefix, i),
ParentID: &rootShop.ID,
Level: 2,
ContactName: "测试联系人",
ContactPhone: fmt.Sprintf("1380000%04d", i),
Status: 1,
Creator: 1,
Updater: 1,
CreatedAt: now,
UpdatedAt: now,
}
if err := db.Create(&child).Error; err != nil {
log.Fatalf("创建子店铺失败: %v", err)
}
shopIDs = append(shopIDs, child.ID)
}
// 创建其余独立店铺(其他代理商)
for i := agentChildShops + 1; i < numShops; i++ {
indep := testShop{
ShopName: fmt.Sprintf("性能测试-独立店铺%d", i),
ShopCode: fmt.Sprintf("%sINDEP-%d", testShopCodePrefix, i),
Level: 1,
ContactName: "测试联系人",
ContactPhone: fmt.Sprintf("1390000%04d", i),
Status: 1,
Creator: 1,
Updater: 1,
CreatedAt: now,
UpdatedAt: now,
}
if err := db.Create(&indep).Error; err != nil {
log.Fatalf("创建独立店铺失败: %v", err)
}
shopIDs = append(shopIDs, indep.ID)
}
return shopIDs
}
func createTestAgent(db *gorm.DB, shopID uint) {
// 先清理可能存在的旧测试账号
db.Exec("DELETE FROM tb_account WHERE username = ?", testAgentUsername)
// bcrypt 加密密码
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(testAgentPassword), bcrypt.DefaultCost)
if err != nil {
log.Fatalf("加密密码失败: %v", err)
}
account := testAccount{
Username: testAgentUsername,
Phone: "19999999999",
Password: string(hashedPassword),
UserType: 3, // 代理账号
ShopID: &shopID,
Status: 1,
Creator: 1,
Updater: 1,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := db.Create(&account).Error; err != nil {
log.Fatalf("创建代理账号失败: %v", err)
}
}
func insertCards(db *gorm.DB, shopIDs []uint) {
idStrs := make([]string, len(shopIDs))
for i, id := range shopIDs {
idStrs[i] = strconv.FormatUint(uint64(id), 10)
}
pgArray := "ARRAY[" + strings.Join(idStrs, ",") + "]::bigint[]"
sqlTemplate := fmt.Sprintf(`
INSERT INTO tb_iot_card (
iccid, card_category, carrier_id, carrier_type, carrier_name,
imsi, msisdn, batch_no, supplier, cost_price, distribute_price,
status, shop_id, activation_status, real_name_status, network_status,
data_usage_mb, enable_polling, created_at, updated_at, creator, updater, series_id,
is_standalone
)
SELECT
'T' || lpad(i::text, 19, '0'),
CASE WHEN random() < 0.85 THEN 'normal' ELSE 'industry' END,
(i %% 4 + 1),
CASE (i %% 4) WHEN 0 THEN 'CMCC' WHEN 1 THEN 'CUCC' WHEN 2 THEN 'CTCC' ELSE 'CBN' END,
CASE (i %% 4) WHEN 0 THEN '中国移动' WHEN 1 THEN '中国联通' WHEN 2 THEN '中国电信' ELSE '中国广电' END,
lpad(i::text, 15, '0'),
'1' || lpad((i %% 10000000)::text, 10, '0'),
'%s',
CASE (i %% 3) WHEN 0 THEN '供应商A' WHEN 1 THEN '供应商B' ELSE '供应商C' END,
(random() * 5000 + 500)::bigint,
(random() * 3000 + 1000)::bigint,
CASE WHEN random() < 0.15 THEN 1 WHEN random() < 0.30 THEN 2 WHEN random() < 0.90 THEN 3 ELSE 4 END,
(%s)[GREATEST(1, LEAST(%d, floor(%d * power(random(), 2.5))::int + 1))],
CASE WHEN random() < 0.7 THEN 1 ELSE 0 END,
CASE WHEN random() < 0.6 THEN 1 ELSE 0 END,
CASE WHEN random() < 0.8 THEN 1 ELSE 0 END,
(random() * 100000)::bigint,
random() < 0.9,
now() - interval '1 day' * (random() * 730),
now() - interval '1 day' * (random() * 30),
1, 1,
CASE WHEN random() < 0.8 THEN (random() * 20 + 1)::int ELSE NULL END,
true
FROM generate_series($1::bigint, $2::bigint) AS s(i)
`, testBatchNo, pgArray, numShops, numShops)
// 500 万/批 × 4 并发 workerHDD 上减少 I/O 竞争)
const workerCount = 4
const chunkSize = 5000000
type task struct {
batchNum int
start int
end int
}
totalBatches := (*total + chunkSize - 1) / chunkSize
taskCh := make(chan task, totalBatches)
var wg sync.WaitGroup
var insertedCount int64
var errCount int64
totalStart := time.Now()
dsn := buildDSN()
for w := 0; w < workerCount; w++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
conn, err := sql.Open("pgx", dsn)
if err != nil {
log.Printf("Worker %d 连接失败: %v", workerID, err)
return
}
defer conn.Close()
conn.SetMaxOpenConns(1)
conn.Exec("SET synchronous_commit = off")
conn.Exec("SET work_mem = '256MB'")
for t := range taskCh {
_, err := conn.Exec(sqlTemplate, t.start, t.end)
if err != nil {
log.Printf("Worker %d 批次 %d 失败: %v", workerID, t.batchNum, err)
atomic.AddInt64(&errCount, 1)
continue
}
count := t.end - t.start + 1
done := atomic.AddInt64(&insertedCount, int64(count))
elapsed := time.Since(totalStart).Seconds()
speed := float64(done) / elapsed
remaining := float64(int64(*total)-done) / speed
fmt.Printf(" [W%d] 批次 %d/%d 完成 | 累计 %d/%d (%.1f%%) | %.0f条/秒 | 剩余 %.0f秒\n",
workerID, t.batchNum, totalBatches, done, *total,
float64(done)*100/float64(*total), speed, remaining)
}
}(w)
}
for b := 0; b < totalBatches; b++ {
start := b*chunkSize + 1
end := (b + 1) * chunkSize
if end > *total {
end = *total
}
taskCh <- task{batchNum: b + 1, start: start, end: end}
}
close(taskCh)
wg.Wait()
elapsed := time.Since(totalStart)
fmt.Printf(" ✅ 共插入 %d 条 (失败 %d 批) | 总耗时 %v | 平均 %.0f 条/秒\n",
atomic.LoadInt64(&insertedCount), atomic.LoadInt64(&errCount), elapsed.Round(time.Second),
float64(atomic.LoadInt64(&insertedCount))/elapsed.Seconds())
}
// ==================== Cleanup ====================
func doCleanup(db *gorm.DB) {
fmt.Println("\n🧹 开始清理性能测试数据")
// 删除测试卡数据(分批删除)
fmt.Println("\n💳 删除测试卡数据...")
totalDeleted := 0
start := time.Now()
for {
result := db.Exec("DELETE FROM tb_iot_card WHERE ctid IN (SELECT ctid FROM tb_iot_card WHERE batch_no = ? LIMIT 500000)", testBatchNo)
if result.Error != nil {
log.Fatalf("删除失败: %v", result.Error)
}
if result.RowsAffected == 0 {
break
}
totalDeleted += int(result.RowsAffected)
elapsed := time.Since(start)
speed := float64(totalDeleted) / elapsed.Seconds()
fmt.Printf(" 已删除 %d 条 (%.0f 条/秒)\n", totalDeleted, speed)
}
fmt.Printf(" ✅ 共删除 %d 条卡数据 (耗时 %v)\n", totalDeleted, time.Since(start).Round(time.Second))
// 删除测试店铺
fmt.Println("\n📦 删除测试店铺...")
result := db.Exec("DELETE FROM tb_shop WHERE shop_code LIKE ?", testShopCodePrefix+"%")
fmt.Printf(" ✅ 删除了 %d 个店铺\n", result.RowsAffected)
// 删除测试账号
fmt.Println("\n👤 删除测试账号...")
result = db.Exec("DELETE FROM tb_account WHERE username = ?", testAgentUsername)
fmt.Printf(" ✅ 删除了 %d 个账号\n", result.RowsAffected)
// 删除优化索引(如果存在)
fmt.Println("\n📇 清理优化索引...")
db.Exec("DROP INDEX IF EXISTS idx_iot_card_perf_shop_created")
db.Exec("DROP INDEX IF EXISTS idx_iot_card_perf_shop_status_created")
fmt.Println(" ✅ 索引已清理")
fmt.Println("\n⚠ 建议手动执行 VACUUM FULL tb_iot_card; 回收磁盘空间")
fmt.Println("🎉 清理完成!")
}
// ==================== Verify ====================
func doVerify(db *gorm.DB) {
fmt.Println("\n📊 验证测试数据分布")
// 总卡数
var total int64
db.Raw("SELECT count(*) FROM tb_iot_card WHERE batch_no = ? AND deleted_at IS NULL", testBatchNo).Scan(&total)
fmt.Printf("\n总测试卡数: %d\n", total)
// 代理商可见卡数
var agentVisible int64
db.Raw(`
SELECT count(*) FROM tb_iot_card
WHERE batch_no = ? AND deleted_at IS NULL
AND shop_id IN (SELECT id FROM tb_shop WHERE shop_code LIKE ? AND deleted_at IS NULL)
`, testBatchNo, testShopCodePrefix+"ROOT").Scan(&agentVisible)
var agentSubVisible int64
db.Raw(`
SELECT count(*) FROM tb_iot_card
WHERE batch_no = ? AND deleted_at IS NULL
AND shop_id IN (
SELECT id FROM tb_shop
WHERE (shop_code LIKE ? OR shop_code LIKE ?) AND deleted_at IS NULL
)
`, testBatchNo, testShopCodePrefix+"ROOT", testShopCodePrefix+"SUB-%").Scan(&agentSubVisible)
fmt.Printf("代理商总店卡数: %d\n", agentVisible)
fmt.Printf("代理商(含下级)卡数: %d (占比 %.1f%%)\n", agentSubVisible, float64(agentSubVisible)*100/float64(total))
// 前 20 名店铺分布
type shopDist struct {
ShopID uint
Cnt int64
}
var dists []shopDist
db.Raw(`
SELECT shop_id, count(*) as cnt FROM tb_iot_card
WHERE batch_no = ? AND deleted_at IS NULL
GROUP BY shop_id ORDER BY cnt DESC LIMIT 20
`, testBatchNo).Scan(&dists)
fmt.Println("\n前 20 名店铺卡量分布:")
fmt.Printf(" %-12s %-12s %-8s\n", "shop_id", "卡数", "占比")
for _, d := range dists {
fmt.Printf(" %-12d %-12d %.2f%%\n", d.ShopID, d.Cnt, float64(d.Cnt)*100/float64(total))
}
// 状态分布
type statusDist struct {
Status int
Cnt int64
}
var statuses []statusDist
db.Raw("SELECT status, count(*) as cnt FROM tb_iot_card WHERE batch_no = ? AND deleted_at IS NULL GROUP BY status ORDER BY status", testBatchNo).Scan(&statuses)
fmt.Println("\n状态分布:")
statusNames := map[int]string{1: "在库", 2: "已分销", 3: "已激活", 4: "已停用"}
for _, s := range statuses {
fmt.Printf(" 状态 %d (%s): %d (%.1f%%)\n", s.Status, statusNames[s.Status], s.Cnt, float64(s.Cnt)*100/float64(total))
}
// 运营商分布
type carrierDist struct {
CarrierID uint
Cnt int64
}
var carriers []carrierDist
db.Raw("SELECT carrier_id, count(*) as cnt FROM tb_iot_card WHERE batch_no = ? AND deleted_at IS NULL GROUP BY carrier_id ORDER BY carrier_id", testBatchNo).Scan(&carriers)
fmt.Println("\n运营商分布:")
for _, c := range carriers {
fmt.Printf(" carrier_id=%d: %d (%.1f%%)\n", c.CarrierID, c.Cnt, float64(c.Cnt)*100/float64(total))
}
// 表大小
var tableSize, indexSize string
db.Raw("SELECT pg_size_pretty(pg_relation_size('tb_iot_card'))").Scan(&tableSize)
db.Raw("SELECT pg_size_pretty(pg_indexes_size('tb_iot_card'))").Scan(&indexSize)
fmt.Printf("\n表大小: %s | 索引大小: %s\n", tableSize, indexSize)
// 测试代理商账号信息
var agentInfo struct {
ID uint
Username string
ShopID uint
}
db.Raw("SELECT id, username, shop_id FROM tb_account WHERE username = ? AND deleted_at IS NULL", testAgentUsername).Scan(&agentInfo)
fmt.Printf("\n测试代理商: id=%d, username=%s, shop_id=%d\n", agentInfo.ID, agentInfo.Username, agentInfo.ShopID)
}
// ==================== Bulk Load Index Management ====================
type indexDef struct {
Name string
Definition string
}
func dropNonPKIndexes(db *gorm.DB) []indexDef {
sqlDB, _ := db.DB()
var indexes []indexDef
rows, err := sqlDB.Query(`
SELECT indexname, indexdef FROM pg_indexes
WHERE tablename = 'tb_iot_card' AND indexname != 'iot_cards_pkey'
ORDER BY indexname
`)
if err != nil {
log.Fatalf("查询索引失败: %v", err)
}
defer rows.Close()
for rows.Next() {
var idx indexDef
rows.Scan(&idx.Name, &idx.Definition)
indexes = append(indexes, idx)
}
for _, idx := range indexes {
fmt.Printf(" 删除 %s ...\n", idx.Name)
sqlDB.Exec("DROP INDEX IF EXISTS " + idx.Name)
}
return indexes
}
func rebuildIndexes(db *gorm.DB, indexes []indexDef) {
sqlDB, _ := db.DB()
for _, idx := range indexes {
fmt.Printf(" 重建 %s ...", idx.Name)
start := time.Now()
if _, err := sqlDB.Exec(idx.Definition); err != nil {
fmt.Printf(" ❌ 失败: %v\n", err)
continue
}
fmt.Printf(" ✅ (%v)\n", time.Since(start).Round(time.Second))
}
}
// ==================== Optimization Index Management ====================
func doAddIndex(db *gorm.DB) {
fmt.Println("\n📇 创建优化复合索引CONCURRENTLY不阻塞读写")
indexes := []struct {
name string
sql string
}{
{
name: "idx_iot_card_perf_shop_created",
sql: "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_iot_card_perf_shop_created ON tb_iot_card (shop_id, created_at DESC) WHERE deleted_at IS NULL",
},
{
name: "idx_iot_card_perf_shop_status_created",
sql: "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_iot_card_perf_shop_status_created ON tb_iot_card (shop_id, status, created_at DESC) WHERE deleted_at IS NULL",
},
}
// CONCURRENTLY 不能在事务内执行,直接用底层连接
sqlDB, _ := db.DB()
// pg_trgm GIN 索引:加速 ICCID/MSISDN 中间模糊搜索LIKE '%xxx%'
fmt.Println("\n 启用 pg_trgm 扩展...")
if _, err := sqlDB.Exec("CREATE EXTENSION IF NOT EXISTS pg_trgm"); err != nil {
fmt.Printf(" ⚠️ pg_trgm 扩展创建失败(可能需要超级管理员权限): %v\n", err)
} else {
fmt.Println(" ✅ pg_trgm 扩展已启用")
trgmIndexes := []struct {
name string
sql string
}{
{
name: "idx_iot_card_iccid_trgm",
sql: "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_iot_card_iccid_trgm ON tb_iot_card USING gin (iccid gin_trgm_ops)",
},
{
name: "idx_iot_card_msisdn_trgm",
sql: "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_iot_card_msisdn_trgm ON tb_iot_card USING gin (msisdn gin_trgm_ops)",
},
}
for _, idx := range trgmIndexes {
indexes = append(indexes, struct {
name string
sql string
}{idx.name, idx.sql})
}
}
for _, idx := range indexes {
fmt.Printf(" 创建 %s ...", idx.name)
start := time.Now()
if _, err := sqlDB.Exec(idx.sql); err != nil {
fmt.Printf(" ❌ 失败: %v\n", err)
continue
}
fmt.Printf(" ✅ (%v)\n", time.Since(start).Round(time.Second))
}
// 删除重复索引
fmt.Println(" 清理重复索引 idx_tb_iot_card_shop_id与 idx_iot_card_shop_id 重复)...")
sqlDB.Exec("DROP INDEX IF EXISTS idx_tb_iot_card_shop_id")
fmt.Println("\n 更新统计信息...")
sqlDB.Exec("ANALYZE tb_iot_card")
fmt.Println(" ✅ 索引优化完成")
}
func doDropIndex(db *gorm.DB) {
fmt.Println("\n📇 删除优化索引(恢复到原始状态)")
sqlDB, _ := db.DB()
sqlDB.Exec("DROP INDEX IF EXISTS idx_iot_card_perf_shop_created")
sqlDB.Exec("DROP INDEX IF EXISTS idx_iot_card_perf_shop_status_created")
sqlDB.Exec("DROP INDEX IF EXISTS idx_iot_card_iccid_trgm")
sqlDB.Exec("DROP INDEX IF EXISTS idx_iot_card_msisdn_trgm")
sqlDB.Exec("ANALYZE tb_iot_card")
fmt.Println(" ✅ 已删除优化索引")
}
// ==================== Utils ====================
func getEnv(key, fallback string) string {
if v := os.Getenv(key); v != "" {
return v
}
return fallback
}