=== Applying patches on top of PostgreSQL commit ID e68b6adad96d414fdf24e072fdb1d41fb4b8f0b7 === /etc/rc.d/jail: WARNING: Per-jail configuration via jail_* variables is obsolete. Please consider migrating to /etc/jail.conf. Fri Nov 28 06:24:23 UTC 2025 On branch cf/6261 nothing to commit, working tree clean === using 'git am' to apply patch ./0001-Rename-AssertVariableIsOfType-to-StaticAssertVariabl.patch === Applying: Rename AssertVariableIsOfType to StaticAssertVariableIsOfType === using 'git am' to apply patch ./0002-Change-StaticAssertVariableIsOfType-to-be-a-declarat.patch === Applying: Change StaticAssertVariableIsOfType to be a declaration === using 'git am' to apply patch ./0003-Remove-most-StaticAssertStmt.patch === Applying: Remove most StaticAssertStmt() Using index info to reconstruct a base tree... M src/backend/access/heap/vacuumlazy.c M src/backend/access/table/tableam.c M src/backend/storage/buffer/bufmgr.c M src/backend/utils/cache/inval.c M src/include/storage/fd.h Falling back to patching base and 3-way merge... Auto-merging src/include/storage/fd.h Auto-merging src/backend/utils/cache/inval.c Auto-merging src/backend/storage/buffer/bufmgr.c Auto-merging src/backend/access/table/tableam.c CONFLICT (content): Merge conflict in src/backend/access/table/tableam.c Auto-merging src/backend/access/heap/vacuumlazy.c error: Failed to merge in the changes. hint: Use 'git am --show-current-patch=diff' to see the failed patch Patch failed at 0001 Remove most StaticAssertStmt() When you have resolved this problem, run "git am --continue". If you prefer to skip this patch, run "git am --skip" instead. To restore the original branch and stop patching, run "git am --abort". Unstaged changes after reset: M contrib/hstore/hstore_compat.c M src/backend/access/heap/vacuumlazy.c M src/backend/access/table/tableam.c M src/backend/access/transam/parallel.c M src/backend/backup/basebackup.c M src/backend/storage/buffer/bufmgr.c M src/backend/storage/file/fd.c M src/backend/storage/ipc/waiteventset.c M src/backend/storage/lmgr/deadlock.c M src/backend/utils/adt/mac.c M src/backend/utils/cache/inval.c M src/backend/utils/mmgr/aset.c M src/include/storage/fd.h === using patch(1) to apply patch ./0003-Remove-most-StaticAssertStmt.patch === patching file contrib/hstore/hstore_compat.c patching file src/backend/access/heap/vacuumlazy.c Hunk #1 succeeded at 3374 (offset 5 lines). Hunk #2 succeeded at 3391 (offset 5 lines). patching file src/backend/access/table/tableam.c Hunk #1 FAILED at 423. 1 out of 1 hunk FAILED -- saving rejects to file src/backend/access/table/tableam.c.rej patching file src/backend/access/transam/parallel.c patching file src/backend/backup/basebackup.c patching file src/backend/storage/buffer/bufmgr.c Hunk #1 succeeded at 7149 (offset 188 lines). patching file src/backend/storage/file/fd.c patching file src/backend/storage/ipc/waiteventset.c patching file src/backend/storage/lmgr/deadlock.c patching file src/backend/utils/adt/mac.c patching file src/backend/utils/cache/inval.c patching file src/backend/utils/mmgr/aset.c patching file src/include/storage/fd.h Hunk #1 succeeded at 92 (offset -1 lines). Unstaged changes after reset: M contrib/hstore/hstore_compat.c M src/backend/access/heap/vacuumlazy.c M src/backend/access/transam/parallel.c M src/backend/backup/basebackup.c M src/backend/storage/buffer/bufmgr.c M src/backend/storage/file/fd.c M src/backend/storage/ipc/waiteventset.c M src/backend/storage/lmgr/deadlock.c M src/backend/utils/adt/mac.c M src/backend/utils/cache/inval.c M src/backend/utils/mmgr/aset.c M src/include/storage/fd.h Removing src/backend/access/table/tableam.c.rej === using 'git apply' to apply patch ./0003-Remove-most-StaticAssertStmt.patch === Applied patch to 'contrib/hstore/hstore_compat.c' cleanly. Applied patch to 'src/backend/access/heap/vacuumlazy.c' cleanly. Applied patch to 'src/backend/access/table/tableam.c' with conflicts. Applied patch to 'src/backend/access/transam/parallel.c' cleanly. Applied patch to 'src/backend/backup/basebackup.c' cleanly. Applied patch to 'src/backend/storage/buffer/bufmgr.c' cleanly. Applied patch to 'src/backend/storage/file/fd.c' cleanly. Applied patch to 'src/backend/storage/ipc/waiteventset.c' cleanly. Applied patch to 'src/backend/storage/lmgr/deadlock.c' cleanly. Applied patch to 'src/backend/utils/adt/mac.c' cleanly. Applied patch to 'src/backend/utils/cache/inval.c' cleanly. Applied patch to 'src/backend/utils/mmgr/aset.c' cleanly. Applied patch to 'src/include/storage/fd.h' cleanly. U src/backend/access/table/tableam.c diff --cc src/backend/access/table/tableam.c index 1e099febdc8,0c90feb0e97..00000000000 --- a/src/backend/access/table/tableam.c +++ b/src/backend/access/table/tableam.c @@@ -458,18 -421,34 +458,42 @@@ table_block_parallelscan_reinitialize(R void table_block_parallelscan_startblock_init(Relation rel, ParallelBlockTableScanWorker pbscanwork, - ParallelBlockTableScanDesc pbscan) + ParallelBlockTableScanDesc pbscan, + BlockNumber startblock, + BlockNumber numblocks) { + StaticAssertDecl(MaxBlockNumber <= 0xFFFFFFFE, + "pg_nextpower2_32 may be too small for non-standard BlockNumber width"); + BlockNumber sync_startpage = InvalidBlockNumber; + BlockNumber scan_nblocks; /* Reset the state we use for controlling allocation size. */ memset(pbscanwork, 0, sizeof(*pbscanwork)); ++<<<<<<< ours + StaticAssertStmt(MaxBlockNumber <= 0xFFFFFFFE, + "pg_nextpower2_32 may be too small for non-standard BlockNumber width"); ++======= + /* + * We determine the chunk size based on the size of the relation. First we + * split the relation into PARALLEL_SEQSCAN_NCHUNKS chunks but we then + * take the next highest power of 2 number of the chunk size. This means + * we split the relation into somewhere between PARALLEL_SEQSCAN_NCHUNKS + * and PARALLEL_SEQSCAN_NCHUNKS / 2 chunks. + */ + pbscanwork->phsw_chunk_size = pg_nextpower2_32(Max(pbscan->phs_nblocks / + PARALLEL_SEQSCAN_NCHUNKS, 1)); + + /* + * Ensure we don't go over the maximum chunk size with larger tables. This + * means we may get much more than PARALLEL_SEQSCAN_NCHUNKS for larger + * tables. Too large a chunk size has been shown to be detrimental to + * synchronous scan performance. + */ + pbscanwork->phsw_chunk_size = Min(pbscanwork->phsw_chunk_size, + PARALLEL_SEQSCAN_MAX_CHUNK_SIZE); ++>>>>>>> theirs retry: /* Grab the spinlock. */