=== Applying patches on top of PostgreSQL commit ID 144ad723a4484927266a316d1c9550d56745ff67 === /etc/rc.d/jail: WARNING: Per-jail configuration via jail_* variables is obsolete. Please consider migrating to /etc/jail.conf. Sun Jul 6 07:35:21 UTC 2025 On branch cf/5652 nothing to commit, working tree clean === using 'git am' to apply patch ./v4-0001-Add-pg_dsm_registry_allocations-system-view.patch === Applying: Add pg_dsm_registry_allocations system view Using index info to reconstruct a base tree... M doc/src/sgml/system-views.sgml M src/backend/catalog/system_views.sql M src/backend/storage/ipc/dsm_registry.c M src/include/catalog/pg_proc.dat M src/test/modules/test_dsm_registry/expected/test_dsm_registry.out M src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql Falling back to patching base and 3-way merge... Auto-merging src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql CONFLICT (content): Merge conflict in src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql Auto-merging src/test/modules/test_dsm_registry/expected/test_dsm_registry.out CONFLICT (content): Merge conflict in src/test/modules/test_dsm_registry/expected/test_dsm_registry.out Auto-merging src/include/catalog/pg_proc.dat Auto-merging src/backend/storage/ipc/dsm_registry.c CONFLICT (content): Merge conflict in src/backend/storage/ipc/dsm_registry.c Auto-merging src/backend/catalog/system_views.sql Auto-merging doc/src/sgml/system-views.sgml error: Failed to merge in the changes. hint: Use 'git am --show-current-patch=diff' to see the failed patch Patch failed at 0001 Add pg_dsm_registry_allocations system view When you have resolved this problem, run "git am --continue". If you prefer to skip this patch, run "git am --skip" instead. To restore the original branch and stop patching, run "git am --abort". Unstaged changes after reset: M doc/src/sgml/system-views.sgml M src/backend/catalog/system_views.sql M src/backend/storage/ipc/dsm_registry.c M src/include/catalog/pg_proc.dat M src/test/modules/test_dsm_registry/expected/test_dsm_registry.out M src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql M src/test/regress/expected/rules.out === using patch(1) to apply patch ./v4-0001-Add-pg_dsm_registry_allocations-system-view.patch === patching file doc/src/sgml/system-views.sgml Hunk #2 succeeded at 4148 (offset -3 lines). patching file src/backend/catalog/system_views.sql patching file src/backend/storage/ipc/dsm_registry.c Hunk #1 succeeded at 40 with fuzz 1 (offset 14 lines). Hunk #2 succeeded at 437 (offset 237 lines). patching file src/include/catalog/pg_proc.dat Hunk #1 succeeded at 8559 (offset 18 lines). patching file src/test/modules/test_dsm_registry/expected/test_dsm_registry.out Hunk #1 succeeded at 24 with fuzz 1 (offset 12 lines). patching file src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql Hunk #1 FAILED at 2. 1 out of 1 hunk FAILED -- saving rejects to file src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql.rej patching file src/test/regress/expected/rules.out Unstaged changes after reset: M doc/src/sgml/system-views.sgml M src/backend/catalog/system_views.sql M src/backend/storage/ipc/dsm_registry.c M src/include/catalog/pg_proc.dat M src/test/modules/test_dsm_registry/expected/test_dsm_registry.out M src/test/regress/expected/rules.out Removing src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql.rej === using 'git apply' to apply patch ./v4-0001-Add-pg_dsm_registry_allocations-system-view.patch === Applied patch to 'doc/src/sgml/system-views.sgml' cleanly. Applied patch to 'src/backend/catalog/system_views.sql' cleanly. Applied patch to 'src/backend/storage/ipc/dsm_registry.c' with conflicts. Applied patch to 'src/include/catalog/pg_proc.dat' cleanly. Applied patch to 'src/test/modules/test_dsm_registry/expected/test_dsm_registry.out' with conflicts. Applied patch to 'src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql' with conflicts. Applied patch to 'src/test/regress/expected/rules.out' cleanly. U src/backend/storage/ipc/dsm_registry.c U src/test/modules/test_dsm_registry/expected/test_dsm_registry.out U src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql diff --cc src/backend/storage/ipc/dsm_registry.c index 828c2ff0c7f,0650c70e173..00000000000 --- a/src/backend/storage/ipc/dsm_registry.c +++ b/src/backend/storage/ipc/dsm_registry.c @@@ -44,14 -31,9 +45,15 @@@ #include "storage/dsm_registry.h" #include "storage/lwlock.h" #include "storage/shmem.h" + #include "utils/builtins.h" #include "utils/memutils.h" +#define DSMR_NAME_LEN 128 + +#define DSMR_DSA_TRANCHE_SUFFIX " DSA" +#define DSMR_DSA_TRANCHE_SUFFIX_LEN (sizeof(DSMR_DSA_TRANCHE_SUFFIX) - 1) +#define DSMR_DSA_TRANCHE_NAME_LEN (DSMR_NAME_LEN + DSMR_DSA_TRANCHE_SUFFIX_LEN) + typedef struct DSMRegistryCtxStruct { dsa_handle dsah; @@@ -259,179 -201,34 +261,212 @@@ GetNamedDSMSegment(const char *name, si return ret; } ++<<<<<<< ours +/* + * Initialize or attach a named DSA. + * + * This routine returns a pointer to the DSA. A new LWLock tranche ID will be + * generated if needed. Note that the lock tranche will be registered with the + * provided name. Also note that this should be called at most once for a + * given DSA in each backend. + */ +dsa_area * +GetNamedDSA(const char *name, bool *found) +{ + DSMRegistryEntry *entry; + MemoryContext oldcontext; + dsa_area *ret; + + Assert(found); + + if (!name || *name == '\0') + ereport(ERROR, + (errmsg("DSA name cannot be empty"))); + + if (strlen(name) >= offsetof(DSMRegistryEntry, type)) + ereport(ERROR, + (errmsg("DSA name too long"))); + + /* Be sure any local memory allocated by DSM/DSA routines is persistent. */ + oldcontext = MemoryContextSwitchTo(TopMemoryContext); + + /* Connect to the registry. */ + init_dsm_registry(); + + entry = dshash_find_or_insert(dsm_registry_table, name, found); + if (!(*found)) + { + NamedDSAState *state = &entry->data.dsa; + + entry->type = DSMR_ENTRY_TYPE_DSA; + + /* Initialize the LWLock tranche for the DSA. */ + state->tranche = LWLockNewTrancheId(); + strcpy(state->tranche_name, name); + LWLockRegisterTranche(state->tranche, state->tranche_name); + + /* Initialize the DSA. */ + ret = dsa_create(state->tranche); + dsa_pin(ret); + dsa_pin_mapping(ret); + + /* Store handle for other backends to use. */ + state->handle = dsa_get_handle(ret); + } + else if (entry->type != DSMR_ENTRY_TYPE_DSA) + ereport(ERROR, + (errmsg("requested DSA does not match type of existing entry"))); + else + { + NamedDSAState *state = &entry->data.dsa; + + if (dsa_is_attached(state->handle)) + ereport(ERROR, + (errmsg("requested DSA already attached to current process"))); + + /* Initialize existing LWLock tranche for the DSA. */ + LWLockRegisterTranche(state->tranche, state->tranche_name); + + /* Attach to existing DSA. */ + ret = dsa_attach(state->handle); + dsa_pin_mapping(ret); + } + + dshash_release_lock(dsm_registry_table, entry); + MemoryContextSwitchTo(oldcontext); + + return ret; +} + +/* + * Initialize or attach a named dshash table. + * + * This routine returns the address of the table. The tranche_id member of + * params is ignored; new tranche IDs will be generated if needed. Note that + * the DSA lock tranche will be registered with the provided name with " DSA" + * appended. The dshash lock tranche will be registered with the provided + * name. Also note that this should be called at most once for a given table + * in each backend. + */ +dshash_table * +GetNamedDSHash(const char *name, const dshash_parameters *params, bool *found) +{ + DSMRegistryEntry *entry; + MemoryContext oldcontext; + dshash_table *ret; + + Assert(params); + Assert(found); + + if (!name || *name == '\0') + ereport(ERROR, + (errmsg("DSHash name cannot be empty"))); + + if (strlen(name) >= offsetof(DSMRegistryEntry, type)) + ereport(ERROR, + (errmsg("DSHash name too long"))); + + /* Be sure any local memory allocated by DSM/DSA routines is persistent. */ + oldcontext = MemoryContextSwitchTo(TopMemoryContext); + + /* Connect to the registry. */ + init_dsm_registry(); + + entry = dshash_find_or_insert(dsm_registry_table, name, found); + if (!(*found)) + { + NamedDSAState *dsa_state = &entry->data.dsh.dsa; + NamedDSHState *dsh_state = &entry->data.dsh; + dshash_parameters params_copy; + dsa_area *dsa; + + entry->type = DSMR_ENTRY_TYPE_DSH; + + /* Initialize the LWLock tranche for the DSA. */ + dsa_state->tranche = LWLockNewTrancheId(); + sprintf(dsa_state->tranche_name, "%s%s", name, DSMR_DSA_TRANCHE_SUFFIX); + LWLockRegisterTranche(dsa_state->tranche, dsa_state->tranche_name); + + /* Initialize the LWLock tranche for the dshash table. */ + dsh_state->tranche = LWLockNewTrancheId(); + strcpy(dsh_state->tranche_name, name); + LWLockRegisterTranche(dsh_state->tranche, dsh_state->tranche_name); + + /* Initialize the DSA for the hash table. */ + dsa = dsa_create(dsa_state->tranche); + dsa_pin(dsa); + dsa_pin_mapping(dsa); + + /* Initialize the dshash table. */ + memcpy(¶ms_copy, params, sizeof(dshash_parameters)); + params_copy.tranche_id = dsh_state->tranche; + ret = dshash_create(dsa, ¶ms_copy, NULL); + + /* Store handles for other backends to use. */ + dsa_state->handle = dsa_get_handle(dsa); + dsh_state->handle = dshash_get_hash_table_handle(ret); + } + else if (entry->type != DSMR_ENTRY_TYPE_DSH) + ereport(ERROR, + (errmsg("requested DSHash does not match type of existing entry"))); + else + { + NamedDSAState *dsa_state = &entry->data.dsh.dsa; + NamedDSHState *dsh_state = &entry->data.dsh; + dsa_area *dsa; + + /* XXX: Should we verify params matches what table was created with? */ + + if (dsa_is_attached(dsa_state->handle)) + ereport(ERROR, + (errmsg("requested DSHash already attached to current process"))); + + /* Initialize existing LWLock tranches for the DSA and dshash table. */ + LWLockRegisterTranche(dsa_state->tranche, dsa_state->tranche_name); + LWLockRegisterTranche(dsh_state->tranche, dsh_state->tranche_name); + + /* Attach to existing DSA for the hash table. */ + dsa = dsa_attach(dsa_state->handle); + dsa_pin_mapping(dsa); + + /* Attach to existing dshash table. */ + ret = dshash_attach(dsa, params, dsh_state->handle, NULL); + } + + dshash_release_lock(dsm_registry_table, entry); + MemoryContextSwitchTo(oldcontext); + + return ret; ++======= + Datum + pg_get_dsm_registry_allocations(PG_FUNCTION_ARGS) + { + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + DSMRegistryEntry *entry; + dshash_seq_status status; + + InitMaterializedSRF(fcinfo, MAT_SRF_USE_EXPECTED_DESC); + + /* Ensure DSM registry initialized */ + init_dsm_registry(); + + /* Use non-exclusive access to avoid blocking other backends */ + dshash_seq_init(&status, dsm_registry_table, false); + + while ((entry = dshash_seq_next(&status)) != NULL) + { + Datum values[2]; + bool nulls[2] = {false, false}; + + values[0] = CStringGetTextDatum(entry->name); + values[1] = Int64GetDatum(entry->size); + + tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, + values, nulls); + } + + dshash_seq_term(&status); + + return (Datum) 0; ++>>>>>>> theirs } diff --cc src/test/modules/test_dsm_registry/expected/test_dsm_registry.out index 8ded82e59d6,3c279378b5b..00000000000 --- a/src/test/modules/test_dsm_registry/expected/test_dsm_registry.out +++ b/src/test/modules/test_dsm_registry/expected/test_dsm_registry.out @@@ -18,9 -12,9 +18,16 @@@ SELECT get_val_in_shmem() 1236 (1 row) ++<<<<<<< ours +SELECT get_val_in_hash('test'); + get_val_in_hash +----------------- + 1414 ++======= + SELECT size > 0 FROM pg_dsm_registry_allocations WHERE name = 'test_dsm_registry'; + ?column? + ---------- + t ++>>>>>>> theirs (1 row) diff --cc src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql index c2e25cddaae,d27ee2e6a1a..00000000000 --- a/src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql +++ b/src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql @@@ -1,6 -1,6 +1,11 @@@ CREATE EXTENSION test_dsm_registry; SELECT set_val_in_shmem(1236); +SELECT set_val_in_hash('test', '1414'); \c SELECT get_val_in_shmem(); ++<<<<<<< ours +SELECT get_val_in_hash('test'); ++======= + + SELECT size > 0 FROM pg_dsm_registry_allocations WHERE name = 'test_dsm_registry'; ++>>>>>>> theirs