=== Applying patches on top of PostgreSQL commit ID 5941946d0934b9eccb0d5bfebd40b155249a0130 === /etc/rc.d/jail: WARNING: Per-jail configuration via jail_* variables is obsolete. Please consider migrating to /etc/jail.conf. Wed Mar 19 19:06:34 UTC 2025 On branch cf/5562 nothing to commit, working tree clean === using 'git am' to apply patch ./v2-0001-HashAgg-use-Bump-allocator-for-hash-TupleHashTabl.patch === Applying: HashAgg: use Bump allocator for hash TupleHashTable entries. Using index info to reconstruct a base tree... M src/backend/executor/execGrouping.c M src/backend/executor/nodeAgg.c M src/include/nodes/execnodes.h Falling back to patching base and 3-way merge... Auto-merging src/include/nodes/execnodes.h Auto-merging src/backend/executor/nodeAgg.c CONFLICT (content): Merge conflict in src/backend/executor/nodeAgg.c Auto-merging src/backend/executor/execGrouping.c CONFLICT (content): Merge conflict in src/backend/executor/execGrouping.c error: Failed to merge in the changes. hint: Use 'git am --show-current-patch=diff' to see the failed patch Patch failed at 0001 HashAgg: use Bump allocator for hash TupleHashTable entries. When you have resolved this problem, run "git am --continue". If you prefer to skip this patch, run "git am --skip" instead. To restore the original branch and stop patching, run "git am --abort". Unstaged changes after reset: M src/backend/executor/execGrouping.c M src/backend/executor/nodeAgg.c M src/include/nodes/execnodes.h === using patch(1) to apply patch ./v2-0001-HashAgg-use-Bump-allocator-for-hash-TupleHashTabl.patch === patch: unrecognized option `--no-backup-if-mismatch' usage: patch [-bCcEeflNnRstuv] [-B backup-prefix] [-D symbol] [-d directory] [-F max-fuzz] [-i patchfile] [-o out-file] [-p strip-count] [-r rej-name] [-V t | nil | never | none] [-x number] [-z backup-ext] [--posix] [origfile [patchfile]] patch additional = NULL; + MemoryContextSwitchTo(hashtable->tablecxt); + /* Copy the first tuple into the table context */ + entry->firstTuple = ExecCopySlotMinimalTuple(slot); ++======= + MinimalTuple mtup; + MinimalTuple firstTuple; + size_t totalsize; /* including alignment and additionalsize */ + + /* created new entry */ + *isnew = true; + + /* + * Extract the minimal tuple into the temp context, then copy it + * into the table context. + */ + MemoryContextSwitchTo(hashtable->tempcxt); + mtup = ExecCopySlotMinimalTuple(slot); + + /* + * Allocate space for the MinimalTuple followed by empty space of + * size additionalsize. The caller can get a maxaligned pointer to + * this data with TupleHashEntryGetAdditional(), and store + * arbitrary data there. + * + * This avoids the need to store an extra pointer or allocate an + * additional chunk, which would waste memory. + */ + totalsize = MAXALIGN(mtup->t_len) + hashtable->additionalsize; + firstTuple = MemoryContextAlloc(hashtable->tablecxt, totalsize); + memcpy(firstTuple, mtup, mtup->t_len); + memset((char *) firstTuple + firstTuple->t_len, 0, + totalsize - firstTuple->t_len); + + entry->firstTuple = firstTuple; ++>>>>>>> theirs } } else diff --cc src/backend/executor/nodeAgg.c index b4a7698a0b3,43a90ed6f7a..00000000000 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@@ -1864,34 -1868,22 +1874,42 @@@ hash_agg_check_limits(AggState *aggstat uint64 ngroups = aggstate->hash_ngroups_current; Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true); ++<<<<<<< ours + Size hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, + true); + bool do_spill = false; + +#ifdef USE_INJECTION_POINTS + if (ngroups >= 1000) + { + if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-spill-1000")) + { + do_spill = true; + INJECTION_POINT_CACHED("hash-aggregate-spill-1000"); + } + } +#endif ++======= + Size entry_mem = MemoryContextMemAllocated(aggstate->hash_tablecxt, + true); + Size tval_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, + true); + Size total_mem = meta_mem + entry_mem + tval_mem; ++>>>>>>> theirs /* * Don't spill unless there's at least one group in the hash table so we * can be sure to make progress even in edge cases. */ if (aggstate->hash_ngroups_current > 0 && - (meta_mem + hashkey_mem > aggstate->hash_mem_limit || + (total_mem > aggstate->hash_mem_limit || ngroups > aggstate->hash_ngroups_limit)) { - hash_agg_enter_spill_mode(aggstate); + do_spill = true; } + + if (do_spill) + hash_agg_enter_spill_mode(aggstate); } /*