=== Applying patches on top of PostgreSQL commit ID 80d7f990496b1c7be61d9a00a2635b7d96b96197 === /etc/rc.d/jail: WARNING: Per-jail configuration via jail_* variables is obsolete. Please consider migrating to /etc/jail.conf. Wed Feb 19 16:32:24 UTC 2025 On branch cf/5562 nothing to commit, working tree clean === applying patch ./v2-0001-HashAgg-use-Bump-allocator-for-hash-TupleHashTabl.patch Applied patch to 'src/backend/executor/execGrouping.c' with conflicts. Applied patch to 'src/backend/executor/nodeAgg.c' with conflicts. Applied patch to 'src/include/nodes/execnodes.h' cleanly. U src/backend/executor/execGrouping.c U src/backend/executor/nodeAgg.c diff --cc src/backend/executor/execGrouping.c index 33b124fbb0,9be4963d06..0000000000 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@@ -477,13 -512,36 +477,46 @@@ LookupTupleHashEntry_internal(TupleHash } else { ++<<<<<<< ours + /* created new entry */ + *isnew = true; + /* zero caller data */ + entry->additional = NULL; + MemoryContextSwitchTo(hashtable->tablecxt); + /* Copy the first tuple into the table context */ + entry->firstTuple = ExecCopySlotMinimalTuple(slot); ++======= + MinimalTuple mtup; + MinimalTuple firstTuple; + size_t totalsize; /* including alignment and additionalsize */ + + /* created new entry */ + *isnew = true; + + /* + * Extract the minimal tuple into the temp context, then copy it + * into the table context. + */ + MemoryContextSwitchTo(hashtable->tempcxt); + mtup = ExecCopySlotMinimalTuple(slot); + + /* + * Allocate space for the MinimalTuple followed by empty space of + * size additionalsize. The caller can get a maxaligned pointer to + * this data with TupleHashEntryGetAdditional(), and store + * arbitrary data there. + * + * This avoids the need to store an extra pointer or allocate an + * additional chunk, which would waste memory. + */ + totalsize = MAXALIGN(mtup->t_len) + hashtable->additionalsize; + firstTuple = MemoryContextAlloc(hashtable->tablecxt, totalsize); + memcpy(firstTuple, mtup, mtup->t_len); + memset((char *) firstTuple + firstTuple->t_len, 0, + totalsize - firstTuple->t_len); + + entry->firstTuple = firstTuple; ++>>>>>>> theirs } } else diff --cc src/backend/executor/nodeAgg.c index ceb8c8a803,43a90ed6f7..0000000000 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@@ -1867,34 -1868,22 +1877,42 @@@ hash_agg_check_limits(AggState *aggstat uint64 ngroups = aggstate->hash_ngroups_current; Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true); ++<<<<<<< ours + Size hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, + true); + bool do_spill = false; + +#ifdef USE_INJECTION_POINTS + if (ngroups >= 1000) + { + if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-spill-1000")) + { + do_spill = true; + INJECTION_POINT_CACHED("hash-aggregate-spill-1000"); + } + } +#endif ++======= + Size entry_mem = MemoryContextMemAllocated(aggstate->hash_tablecxt, + true); + Size tval_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, + true); + Size total_mem = meta_mem + entry_mem + tval_mem; ++>>>>>>> theirs /* * Don't spill unless there's at least one group in the hash table so we * can be sure to make progress even in edge cases. */ if (aggstate->hash_ngroups_current > 0 && - (meta_mem + hashkey_mem > aggstate->hash_mem_limit || + (total_mem > aggstate->hash_mem_limit || ngroups > aggstate->hash_ngroups_limit)) { - hash_agg_enter_spill_mode(aggstate); + do_spill = true; } + + if (do_spill) + hash_agg_enter_spill_mode(aggstate); } /*