From a9f027a051d626291e007169760e0e8cae2cb581 Mon Sep 17 00:00:00 2001 From: wucm667 Date: Fri, 1 May 2026 09:37:43 +0800 Subject: [PATCH] fix(migration): handle pipeline-incompatible statements in ExecBatch CREATE INDEX CONCURRENTLY, REINDEX CONCURRENTLY, and VACUUM cannot run inside a pgx pipeline (extended query protocol). When db reset encounters these statements, the batch fails with SQLSTATE 25001. Fix: detect pipeline-incompatible statements and execute them individually via conn.Exec (simple query protocol) while batching the rest normally. Fixes supabase/cli#5139 Signed-off-by: wucm667 --- pkg/migration/file.go | 66 ++++++++++++++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 16 deletions(-) diff --git a/pkg/migration/file.go b/pkg/migration/file.go index 540c129e33..fa8aa55aa8 100644 --- a/pkg/migration/file.go +++ b/pkg/migration/file.go @@ -72,23 +72,22 @@ func NewMigrationFromReader(sql io.Reader) (*MigrationFile, error) { return &MigrationFile{Statements: lines}, nil } +func isPipelineIncompatible(sql string) bool { + upper := strings.ToUpper(strings.TrimSpace(sql)) + return strings.Contains(upper, "INDEX CONCURRENTLY") || + strings.Contains(upper, "REINDEX CONCURRENTLY") || + strings.HasPrefix(upper, "VACUUM ") || + strings.HasPrefix(upper, "ALTER SYSTEM") || + strings.HasPrefix(upper, "CLUSTER ") +} + func (m *MigrationFile) ExecBatch(ctx context.Context, conn *pgx.Conn) error { - // Batch migration commands, without using statement cache batch := &pgconn.Batch{} - for _, line := range m.Statements { - batch.ExecParams(line, nil, nil, nil, nil) - } - // Insert into migration history - if len(m.Version) > 0 { - if err := m.insertVersionSQL(conn, batch); err != nil { - return err - } - } - // ExecBatch is implicitly transactional - if result, err := conn.PgConn().ExecBatch(ctx, batch).ReadAll(); err != nil { - // Defaults to printing the last statement on error + batchSize := 0 + executed := 0 + + formatError := func(err error, i int) error { stat := INSERT_MIGRATION_VERSION - i := len(result) if i < len(m.Statements) { stat = m.Statements[i] } @@ -99,7 +98,6 @@ func (m *MigrationFile) ExecBatch(ctx context.Context, conn *pgx.Conn) error { if len(pgErr.Detail) > 0 { msg = append(msg, pgErr.Detail) } - // Provide helpful hint for extension type errors (SQLSTATE 42704: undefined_object) if typeName := extractTypeName(pgErr.Message); len(typeName) > 0 && pgErr.Code == "42704" && !IsSchemaQualified(typeName) { msg = append(msg, "") msg = append(msg, "Hint: This type may be defined in a schema that's not in your search_path.") @@ -111,7 +109,43 @@ func (m *MigrationFile) ExecBatch(ctx context.Context, conn *pgx.Conn) error { msg = append(msg, fmt.Sprintf("At statement: %d", i), stat) return errors.Errorf("%w\n%s", err, strings.Join(msg, "\n")) } - return nil + + flushBatch := func() error { + if batchSize == 0 { + return nil + } + if result, err := conn.PgConn().ExecBatch(ctx, batch).ReadAll(); err != nil { + return formatError(err, executed+len(result)) + } + executed += batchSize + batch = &pgconn.Batch{} + batchSize = 0 + return nil + } + + for _, line := range m.Statements { + if isPipelineIncompatible(line) { + if err := flushBatch(); err != nil { + return err + } + if _, err := conn.Exec(ctx, line); err != nil { + return formatError(err, executed) + } + executed++ + } else { + batch.ExecParams(line, nil, nil, nil, nil) + batchSize++ + } + } + + // Insert into migration history + if len(m.Version) > 0 { + if err := m.insertVersionSQL(conn, batch); err != nil { + return err + } + } + + return flushBatch() } func markError(stat string, pos int) string {