summaryrefslogtreecommitdiffstats
path: root/src/backend/utils
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:18:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:18:03 +0000
commitb4b8efbd3826ac0af2d1c2e7c40fcf80a4bfba45 (patch)
treebec866278030c41c624a91037b1dd88f41c99d8e /src/backend/utils
parentAdding upstream version 15.5. (diff)
downloadpostgresql-15-upstream/15.6.tar.xz
postgresql-15-upstream/15.6.zip
Adding upstream version 15.6.upstream/15.6
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/activity/backend_status.c6
-rw-r--r--src/backend/utils/activity/pgstat.c3
-rw-r--r--src/backend/utils/activity/wait_event.c3
-rw-r--r--src/backend/utils/adt/timestamp.c96
-rw-r--r--src/backend/utils/adt/xml.c14
-rw-r--r--src/backend/utils/cache/catcache.c294
-rw-r--r--src/backend/utils/cache/relmapper.c16
-rw-r--r--src/backend/utils/error/elog.c4
-rw-r--r--src/backend/utils/misc/guc.c11
9 files changed, 308 insertions, 139 deletions
diff --git a/src/backend/utils/activity/backend_status.c b/src/backend/utils/activity/backend_status.c
index 3ecb15d..73f23bc 100644
--- a/src/backend/utils/activity/backend_status.c
+++ b/src/backend/utils/activity/backend_status.c
@@ -263,9 +263,9 @@ pgstat_beinit(void)
* Assign the MyBEEntry for an auxiliary process. Since it doesn't
* have a BackendId, the slot is statically allocated based on the
* auxiliary process type (MyAuxProcType). Backends use slots indexed
- * in the range from 1 to MaxBackends (inclusive), so we use
- * MaxBackends + AuxBackendType + 1 as the index of the slot for an
- * auxiliary process.
+ * in the range from 0 to MaxBackends (exclusive), so we use
+ * MaxBackends + AuxProcType as the index of the slot for an auxiliary
+ * process.
*/
MyBEEntry = &BackendStatusArray[MaxBackends + MyAuxProcType];
}
diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c
index 84d65a7..142e26a 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -918,6 +918,9 @@ pgstat_snapshot_fixed(PgStat_Kind kind)
AssertArg(pgstat_is_kind_valid(kind));
AssertArg(pgstat_get_kind_info(kind)->fixed_amount);
+ if (force_stats_snapshot_clear)
+ pgstat_clear_snapshot();
+
if (pgstat_fetch_consistency == PGSTAT_FETCH_CONSISTENCY_SNAPSHOT)
pgstat_build_snapshot();
else
diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c
index 87c15b9..7875257 100644
--- a/src/backend/utils/activity/wait_event.c
+++ b/src/backend/utils/activity/wait_event.c
@@ -702,6 +702,9 @@ pgstat_get_wait_io(WaitEventIO w)
case WAIT_EVENT_TWOPHASE_FILE_WRITE:
event_name = "TwophaseFileWrite";
break;
+ case WAIT_EVENT_VERSION_FILE_SYNC:
+ event_name = "VersionFileSync";
+ break;
case WAIT_EVENT_VERSION_FILE_WRITE:
event_name = "VersionFileWrite";
break;
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index f70f829..27073cb 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -2949,8 +2949,16 @@ timestamp_pl_interval(PG_FUNCTION_ARGS)
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
errmsg("timestamp out of range")));
- /* Add days by converting to and from Julian */
- julian = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + span->day;
+ /*
+ * Add days by converting to and from Julian. We need an overflow
+ * check here since j2date expects a non-negative integer input.
+ */
+ julian = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday);
+ if (pg_add_s32_overflow(julian, span->day, &julian) ||
+ julian < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
j2date(julian, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
if (tm2timestamp(tm, fsec, NULL, &timestamp) != 0)
@@ -3057,8 +3065,19 @@ timestamptz_pl_interval(PG_FUNCTION_ARGS)
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
errmsg("timestamp out of range")));
- /* Add days by converting to and from Julian */
- julian = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + span->day;
+ /*
+ * Add days by converting to and from Julian. We need an overflow
+ * check here since j2date expects a non-negative integer input.
+ * In practice though, it will give correct answers for small
+ * negative Julian dates; we should allow -1 to avoid
+ * timezone-dependent failures, as discussed in timestamp.h.
+ */
+ julian = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday);
+ if (pg_add_s32_overflow(julian, span->day, &julian) ||
+ julian < -1)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
j2date(julian, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
tz = DetermineTimeZoneOffset(tm, session_timezone);
@@ -3246,19 +3265,13 @@ interval_mul(PG_FUNCTION_ARGS)
result = (Interval *) palloc(sizeof(Interval));
result_double = span->month * factor;
- if (isnan(result_double) ||
- result_double > INT_MAX || result_double < INT_MIN)
- ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("interval out of range")));
+ if (isnan(result_double) || !FLOAT8_FITS_IN_INT32(result_double))
+ goto out_of_range;
result->month = (int32) result_double;
result_double = span->day * factor;
- if (isnan(result_double) ||
- result_double > INT_MAX || result_double < INT_MIN)
- ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("interval out of range")));
+ if (isnan(result_double) || !FLOAT8_FITS_IN_INT32(result_double))
+ goto out_of_range;
result->day = (int32) result_double;
/*
@@ -3292,20 +3305,30 @@ interval_mul(PG_FUNCTION_ARGS)
*/
if (Abs(sec_remainder) >= SECS_PER_DAY)
{
- result->day += (int) (sec_remainder / SECS_PER_DAY);
+ if (pg_add_s32_overflow(result->day,
+ (int) (sec_remainder / SECS_PER_DAY),
+ &result->day))
+ goto out_of_range;
sec_remainder -= (int) (sec_remainder / SECS_PER_DAY) * SECS_PER_DAY;
}
/* cascade units down */
- result->day += (int32) month_remainder_days;
+ if (pg_add_s32_overflow(result->day, (int32) month_remainder_days,
+ &result->day))
+ goto out_of_range;
result_double = rint(span->time * factor + sec_remainder * USECS_PER_SEC);
if (isnan(result_double) || !FLOAT8_FITS_IN_INT64(result_double))
- ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("interval out of range")));
+ goto out_of_range;
result->time = (int64) result_double;
PG_RETURN_INTERVAL_P(result);
+
+out_of_range:
+ ereport(ERROR,
+ errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("interval out of range"));
+
+ PG_RETURN_NULL(); /* keep compiler quiet */
}
Datum
@@ -3324,7 +3347,8 @@ interval_div(PG_FUNCTION_ARGS)
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
double month_remainder_days,
- sec_remainder;
+ sec_remainder,
+ result_double;
int32 orig_month = span->month,
orig_day = span->day;
Interval *result;
@@ -3336,8 +3360,15 @@ interval_div(PG_FUNCTION_ARGS)
(errcode(ERRCODE_DIVISION_BY_ZERO),
errmsg("division by zero")));
- result->month = (int32) (span->month / factor);
- result->day = (int32) (span->day / factor);
+ result_double = span->month / factor;
+ if (isnan(result_double) || !FLOAT8_FITS_IN_INT32(result_double))
+ goto out_of_range;
+ result->month = (int32) result_double;
+
+ result_double = span->day / factor;
+ if (isnan(result_double) || !FLOAT8_FITS_IN_INT32(result_double))
+ goto out_of_range;
+ result->day = (int32) result_double;
/*
* Fractional months full days into days. See comment in interval_mul().
@@ -3349,15 +3380,30 @@ interval_div(PG_FUNCTION_ARGS)
sec_remainder = TSROUND(sec_remainder);
if (Abs(sec_remainder) >= SECS_PER_DAY)
{
- result->day += (int) (sec_remainder / SECS_PER_DAY);
+ if (pg_add_s32_overflow(result->day,
+ (int) (sec_remainder / SECS_PER_DAY),
+ &result->day))
+ goto out_of_range;
sec_remainder -= (int) (sec_remainder / SECS_PER_DAY) * SECS_PER_DAY;
}
/* cascade units down */
- result->day += (int32) month_remainder_days;
- result->time = rint(span->time / factor + sec_remainder * USECS_PER_SEC);
+ if (pg_add_s32_overflow(result->day, (int32) month_remainder_days,
+ &result->day))
+ goto out_of_range;
+ result_double = rint(span->time / factor + sec_remainder * USECS_PER_SEC);
+ if (isnan(result_double) || !FLOAT8_FITS_IN_INT64(result_double))
+ goto out_of_range;
+ result->time = (int64) result_double;
PG_RETURN_INTERVAL_P(result);
+
+out_of_range:
+ ereport(ERROR,
+ errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("interval out of range"));
+
+ PG_RETURN_NULL(); /* keep compiler quiet */
}
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 6411f56..aae0692 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -65,6 +65,16 @@
#if LIBXML_VERSION >= 20704
#define HAVE_XMLSTRUCTUREDERRORCONTEXT 1
#endif
+
+/*
+ * libxml2 2.12 decided to insert "const" into the error handler API.
+ */
+#if LIBXML_VERSION >= 21200
+#define PgXmlErrorPtr const xmlError *
+#else
+#define PgXmlErrorPtr xmlErrorPtr
+#endif
+
#endif /* USE_LIBXML */
#include "access/htup_details.h"
@@ -119,7 +129,7 @@ struct PgXmlErrorContext
static xmlParserInputPtr xmlPgEntityLoader(const char *URL, const char *ID,
xmlParserCtxtPtr ctxt);
-static void xml_errorHandler(void *data, xmlErrorPtr error);
+static void xml_errorHandler(void *data, PgXmlErrorPtr error);
static void xml_ereport_by_code(int level, int sqlcode,
const char *msg, int errcode);
static void chopStringInfoNewlines(StringInfo str);
@@ -1749,7 +1759,7 @@ xml_ereport(PgXmlErrorContext *errcxt, int level, int sqlcode, const char *msg)
* Error handler for libxml errors and warnings
*/
static void
-xml_errorHandler(void *data, xmlErrorPtr error)
+xml_errorHandler(void *data, PgXmlErrorPtr error)
{
PgXmlErrorContext *xmlerrcxt = (PgXmlErrorContext *) data;
xmlParserCtxtPtr ctxt = (xmlParserCtxtPtr) error->ctxt;
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 38e943f..d22bc07 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -25,6 +25,7 @@
#include "catalog/pg_operator.h"
#include "catalog/pg_type.h"
#include "common/hashfn.h"
+#include "common/pg_prng.h"
#include "miscadmin.h"
#include "port/pg_bitutils.h"
#ifdef CATCACHE_STATS
@@ -90,10 +91,10 @@ static void CatCachePrintStats(int code, Datum arg);
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
static void CatalogCacheInitializeCache(CatCache *cache);
-static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
+static CatCTup *CatalogCacheCreateEntry(CatCache *cache,
+ HeapTuple ntp, SysScanDesc scandesc,
Datum *arguments,
- uint32 hashValue, Index hashIndex,
- bool negative);
+ uint32 hashValue, Index hashIndex);
static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
Datum *keys);
@@ -1318,6 +1319,7 @@ SearchCatCacheMiss(CatCache *cache,
SysScanDesc scandesc;
HeapTuple ntp;
CatCTup *ct;
+ bool stale;
Datum arguments[CATCACHE_MAXKEYS];
/* Initialize local parameter array */
@@ -1327,16 +1329,6 @@ SearchCatCacheMiss(CatCache *cache,
arguments[3] = v4;
/*
- * Ok, need to make a lookup in the relation, copy the scankey and fill
- * out any per-call fields.
- */
- memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
- cur_skey[0].sk_argument = v1;
- cur_skey[1].sk_argument = v2;
- cur_skey[2].sk_argument = v3;
- cur_skey[3].sk_argument = v4;
-
- /*
* Tuple was not found in cache, so we have to try to retrieve it directly
* from the relation. If found, we will add it to the cache; if not
* found, we will add a negative cache entry instead.
@@ -1350,31 +1342,57 @@ SearchCatCacheMiss(CatCache *cache,
* will eventually age out of the cache, so there's no functional problem.
* This case is rare enough that it's not worth expending extra cycles to
* detect.
+ *
+ * Another case, which we *must* handle, is that the tuple could become
+ * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
+ * AcceptInvalidationMessages can run during TOAST table access). We do
+ * not want to return already-stale catcache entries, so we loop around
+ * and do the table scan again if that happens.
*/
relation = table_open(cache->cc_reloid, AccessShareLock);
- scandesc = systable_beginscan(relation,
- cache->cc_indexoid,
- IndexScanOK(cache, cur_skey),
- NULL,
- nkeys,
- cur_skey);
+ do
+ {
+ /*
+ * Ok, need to make a lookup in the relation, copy the scankey and
+ * fill out any per-call fields. (We must re-do this when retrying,
+ * because systable_beginscan scribbles on the scankey.)
+ */
+ memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
+ cur_skey[0].sk_argument = v1;
+ cur_skey[1].sk_argument = v2;
+ cur_skey[2].sk_argument = v3;
+ cur_skey[3].sk_argument = v4;
+
+ scandesc = systable_beginscan(relation,
+ cache->cc_indexoid,
+ IndexScanOK(cache, cur_skey),
+ NULL,
+ nkeys,
+ cur_skey);
- ct = NULL;
+ ct = NULL;
+ stale = false;
- while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
- {
- ct = CatalogCacheCreateEntry(cache, ntp, arguments,
- hashValue, hashIndex,
- false);
- /* immediately set the refcount to 1 */
- ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
- ct->refcount++;
- ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
- break; /* assume only one match */
- }
+ while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
+ {
+ ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
+ hashValue, hashIndex);
+ /* upon failure, we must start the scan over */
+ if (ct == NULL)
+ {
+ stale = true;
+ break;
+ }
+ /* immediately set the refcount to 1 */
+ ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
+ ct->refcount++;
+ ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
+ break; /* assume only one match */
+ }
- systable_endscan(scandesc);
+ systable_endscan(scandesc);
+ } while (stale);
table_close(relation, AccessShareLock);
@@ -1393,9 +1411,11 @@ SearchCatCacheMiss(CatCache *cache,
if (IsBootstrapProcessingMode())
return NULL;
- ct = CatalogCacheCreateEntry(cache, NULL, arguments,
- hashValue, hashIndex,
- true);
+ ct = CatalogCacheCreateEntry(cache, NULL, NULL, arguments,
+ hashValue, hashIndex);
+
+ /* Creating a negative cache entry shouldn't fail */
+ Assert(ct != NULL);
CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
@@ -1602,7 +1622,8 @@ SearchCatCacheList(CatCache *cache,
* We have to bump the member refcounts temporarily to ensure they won't
* get dropped from the cache while loading other members. We use a PG_TRY
* block to ensure we can undo those refcounts if we get an error before
- * we finish constructing the CatCList.
+ * we finish constructing the CatCList. ctlist must be valid throughout
+ * the PG_TRY block.
*/
ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
@@ -1613,83 +1634,113 @@ SearchCatCacheList(CatCache *cache,
ScanKeyData cur_skey[CATCACHE_MAXKEYS];
Relation relation;
SysScanDesc scandesc;
-
- /*
- * Ok, need to make a lookup in the relation, copy the scankey and
- * fill out any per-call fields.
- */
- memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
- cur_skey[0].sk_argument = v1;
- cur_skey[1].sk_argument = v2;
- cur_skey[2].sk_argument = v3;
- cur_skey[3].sk_argument = v4;
+ bool stale;
relation = table_open(cache->cc_reloid, AccessShareLock);
- scandesc = systable_beginscan(relation,
- cache->cc_indexoid,
- IndexScanOK(cache, cur_skey),
- NULL,
- nkeys,
- cur_skey);
-
- /* The list will be ordered iff we are doing an index scan */
- ordered = (scandesc->irel != NULL);
-
- while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
+ do
{
- uint32 hashValue;
- Index hashIndex;
- bool found = false;
- dlist_head *bucket;
-
/*
- * See if there's an entry for this tuple already.
+ * Ok, need to make a lookup in the relation, copy the scankey and
+ * fill out any per-call fields. (We must re-do this when
+ * retrying, because systable_beginscan scribbles on the scankey.)
*/
- ct = NULL;
- hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
- hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
+ memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
+ cur_skey[0].sk_argument = v1;
+ cur_skey[1].sk_argument = v2;
+ cur_skey[2].sk_argument = v3;
+ cur_skey[3].sk_argument = v4;
- bucket = &cache->cc_bucket[hashIndex];
- dlist_foreach(iter, bucket)
- {
- ct = dlist_container(CatCTup, cache_elem, iter.cur);
+ scandesc = systable_beginscan(relation,
+ cache->cc_indexoid,
+ IndexScanOK(cache, cur_skey),
+ NULL,
+ nkeys,
+ cur_skey);
- if (ct->dead || ct->negative)
- continue; /* ignore dead and negative entries */
+ /* The list will be ordered iff we are doing an index scan */
+ ordered = (scandesc->irel != NULL);
- if (ct->hash_value != hashValue)
- continue; /* quickly skip entry if wrong hash val */
+ stale = false;
- if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
- continue; /* not same tuple */
+ while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
+ {
+ uint32 hashValue;
+ Index hashIndex;
+ bool found = false;
+ dlist_head *bucket;
/*
- * Found a match, but can't use it if it belongs to another
- * list already
+ * See if there's an entry for this tuple already.
*/
- if (ct->c_list)
- continue;
-
- found = true;
- break; /* A-OK */
- }
-
- if (!found)
- {
- /* We didn't find a usable entry, so make a new one */
- ct = CatalogCacheCreateEntry(cache, ntp, arguments,
- hashValue, hashIndex,
- false);
+ ct = NULL;
+ hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
+ hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
+
+ bucket = &cache->cc_bucket[hashIndex];
+ dlist_foreach(iter, bucket)
+ {
+ ct = dlist_container(CatCTup, cache_elem, iter.cur);
+
+ if (ct->dead || ct->negative)
+ continue; /* ignore dead and negative entries */
+
+ if (ct->hash_value != hashValue)
+ continue; /* quickly skip entry if wrong hash val */
+
+ if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
+ continue; /* not same tuple */
+
+ /*
+ * Found a match, but can't use it if it belongs to
+ * another list already
+ */
+ if (ct->c_list)
+ continue;
+
+ found = true;
+ break; /* A-OK */
+ }
+
+ if (!found)
+ {
+ /* We didn't find a usable entry, so make a new one */
+ ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
+ hashValue, hashIndex);
+ /* upon failure, we must start the scan over */
+ if (ct == NULL)
+ {
+ /*
+ * Release refcounts on any items we already had. We
+ * dare not try to free them if they're now
+ * unreferenced, since an error while doing that would
+ * result in the PG_CATCH below doing extra refcount
+ * decrements. Besides, we'll likely re-adopt those
+ * items in the next iteration, so it's not worth
+ * complicating matters to try to get rid of them.
+ */
+ foreach(ctlist_item, ctlist)
+ {
+ ct = (CatCTup *) lfirst(ctlist_item);
+ Assert(ct->c_list == NULL);
+ Assert(ct->refcount > 0);
+ ct->refcount--;
+ }
+ /* Reset ctlist in preparation for new try */
+ ctlist = NIL;
+ stale = true;
+ break;
+ }
+ }
+
+ /* Careful here: add entry to ctlist, then bump its refcount */
+ /* This way leaves state correct if lappend runs out of memory */
+ ctlist = lappend(ctlist, ct);
+ ct->refcount++;
}
- /* Careful here: add entry to ctlist, then bump its refcount */
- /* This way leaves state correct if lappend runs out of memory */
- ctlist = lappend(ctlist, ct);
- ct->refcount++;
- }
-
- systable_endscan(scandesc);
+ systable_endscan(scandesc);
+ } while (stale);
table_close(relation, AccessShareLock);
@@ -1796,22 +1847,42 @@ ReleaseCatCacheList(CatCList *list)
* CatalogCacheCreateEntry
* Create a new CatCTup entry, copying the given HeapTuple and other
* supplied data into it. The new entry initially has refcount 0.
+ *
+ * To create a normal cache entry, ntp must be the HeapTuple just fetched
+ * from scandesc, and "arguments" is not used. To create a negative cache
+ * entry, pass NULL for ntp and scandesc; then "arguments" is the cache
+ * keys to use. In either case, hashValue/hashIndex are the hash values
+ * computed from the cache keys.
+ *
+ * Returns NULL if we attempt to detoast the tuple and observe that it
+ * became stale. (This cannot happen for a negative entry.) Caller must
+ * retry the tuple lookup in that case.
*/
static CatCTup *
-CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
- uint32 hashValue, Index hashIndex,
- bool negative)
+CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, SysScanDesc scandesc,
+ Datum *arguments,
+ uint32 hashValue, Index hashIndex)
{
CatCTup *ct;
HeapTuple dtp;
MemoryContext oldcxt;
- /* negative entries have no tuple associated */
if (ntp)
{
int i;
- Assert(!negative);
+ /*
+ * The visibility recheck below essentially never fails during our
+ * regression tests, and there's no easy way to force it to fail for
+ * testing purposes. To ensure we have test coverage for the retry
+ * paths in our callers, make debug builds randomly fail about 0.1% of
+ * the times through this code path, even when there's no toasted
+ * fields.
+ */
+#ifdef USE_ASSERT_CHECKING
+ if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 1000))
+ return NULL;
+#endif
/*
* If there are any out-of-line toasted fields in the tuple, expand
@@ -1821,7 +1892,20 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
* something using a slightly stale catcache entry.
*/
if (HeapTupleHasExternal(ntp))
+ {
dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
+
+ /*
+ * The tuple could become stale while we are doing toast table
+ * access (since AcceptInvalidationMessages can run then), so we
+ * must recheck its visibility afterwards.
+ */
+ if (!systable_recheck_tuple(scandesc, ntp))
+ {
+ heap_freetuple(dtp);
+ return NULL;
+ }
+ }
else
dtp = ntp;
@@ -1860,7 +1944,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
}
else
{
- Assert(negative);
+ /* Set up keys for a negative cache entry */
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup));
@@ -1882,7 +1966,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
ct->c_list = NULL;
ct->refcount = 0; /* for the moment */
ct->dead = false;
- ct->negative = negative;
+ ct->negative = (ntp == NULL);
ct->hash_value = hashValue;
dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c
index 2a330cf..4c50044 100644
--- a/src/backend/utils/cache/relmapper.c
+++ b/src/backend/utils/cache/relmapper.c
@@ -298,14 +298,15 @@ RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath, char *dstdbpath)
* Write the same data into the destination database's relmap file.
*
* No sinval is needed because no one can be connected to the destination
- * database yet. For the same reason, there is no need to acquire
- * RelationMappingLock.
+ * database yet.
*
* There's no point in trying to preserve files here. The new database
* isn't usable yet anyway, and won't ever be if we can't install a relmap
* file.
*/
+ LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE);
write_relmap_file(&map, true, false, false, dbid, tsid, dstdbpath);
+ LWLockRelease(RelationMappingLock);
}
/*
@@ -627,10 +628,12 @@ RelationMapFinishBootstrap(void)
Assert(pending_local_updates.num_mappings == 0);
/* Write the files; no WAL or sinval needed */
+ LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE);
write_relmap_file(&shared_map, false, false, false,
InvalidOid, GLOBALTABLESPACE_OID, "global");
write_relmap_file(&local_map, false, false, false,
MyDatabaseId, MyDatabaseTableSpace, DatabasePath);
+ LWLockRelease(RelationMappingLock);
}
/*
@@ -877,6 +880,15 @@ write_relmap_file(RelMapFile *newmap, bool write_wal, bool send_sinval,
char mapfilename[MAXPGPATH];
/*
+ * Even without concurrent use of this map, CheckPointRelationMap() relies
+ * on this locking. Without it, a restore of a base backup taken after
+ * this function's XLogInsert() and before its durable_rename() would not
+ * have the changes. wal_level=minimal doesn't need the lock, but this
+ * isn't performance-critical enough for such a micro-optimization.
+ */
+ Assert(LWLockHeldByMeInMode(RelationMappingLock, LW_EXCLUSIVE));
+
+ /*
* Fill in the overhead fields and update CRC.
*/
newmap->magic = RELMAPPER_FILEMAGIC;
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 6fa4f2c..0106683 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -755,6 +755,10 @@ errcode_for_file_access(void)
edata->sqlerrcode = ERRCODE_DISK_FULL;
break;
+ case ENOMEM: /* Out of memory */
+ edata->sqlerrcode = ERRCODE_OUT_OF_MEMORY;
+ break;
+
case ENFILE: /* File table overflow */
case EMFILE: /* Too many open files */
edata->sqlerrcode = ERRCODE_INSUFFICIENT_RESOURCES;
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 0b5b77b..07d6538 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -1285,7 +1285,7 @@ static struct config_bool ConfigureNamesBool[] =
{"fsync", PGC_SIGHUP, WAL_SETTINGS,
gettext_noop("Forces synchronization of updates to disk."),
gettext_noop("The server will use the fsync() system call in several places to make "
- "sure that updates are physically written to disk. This insures "
+ "sure that updates are physically written to disk. This ensures "
"that a database cluster will recover to a consistent state after "
"an operating system or hardware crash.")
},
@@ -7643,9 +7643,12 @@ set_config_option_ext(const char *name, const char *value,
* Other changes might need to affect other workers, so forbid them.
*/
if (IsInParallelMode() && changeVal && action != GUC_ACTION_SAVE)
+ {
ereport(elevel,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
errmsg("cannot set parameters during a parallel operation")));
+ return -1;
+ }
record = find_option(name, true, false, elevel);
if (record == NULL)
@@ -7740,6 +7743,10 @@ set_config_option_ext(const char *name, const char *value,
* backends. This is a tad klugy, but necessary because we
* don't re-read the config file during backend start.
*
+ * However, if changeVal is false then plow ahead anyway since
+ * we are trying to find out if the value is potentially good,
+ * not actually use it.
+ *
* In EXEC_BACKEND builds, this works differently: we load all
* non-default settings from the CONFIG_EXEC_PARAMS file
* during backend start. In that case we must accept
@@ -7750,7 +7757,7 @@ set_config_option_ext(const char *name, const char *value,
* started it. is_reload will be true when either situation
* applies.
*/
- if (IsUnderPostmaster && !is_reload)
+ if (IsUnderPostmaster && changeVal && !is_reload)
return -1;
}
else if (context != PGC_POSTMASTER &&