diff options
Diffstat (limited to 'src/backend/commands')
-rw-r--r-- | src/backend/commands/alter.c | 17 | ||||
-rw-r--r-- | src/backend/commands/copyfrom.c | 6 | ||||
-rw-r--r-- | src/backend/commands/dbcommands.c | 52 | ||||
-rw-r--r-- | src/backend/commands/explain.c | 2 | ||||
-rw-r--r-- | src/backend/commands/indexcmds.c | 36 | ||||
-rw-r--r-- | src/backend/commands/matview.c | 40 | ||||
-rw-r--r-- | src/backend/commands/statscmds.c | 23 | ||||
-rw-r--r-- | src/backend/commands/tablecmds.c | 68 | ||||
-rw-r--r-- | src/backend/commands/trigger.c | 28 | ||||
-rw-r--r-- | src/backend/commands/tsearchcmds.c | 95 |
10 files changed, 227 insertions, 140 deletions
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 5456b82..6f27a49 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -1046,9 +1046,14 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) /* Perform actual update */ CatalogTupleUpdate(rel, &newtup->t_self, newtup); - /* Update owner dependency reference */ + /* + * Update owner dependency reference. When working on a large object, + * we have to translate back to the OID conventionally used for LOs' + * classId. + */ if (classId == LargeObjectMetadataRelationId) classId = LargeObjectRelationId; + changeDependencyOnOwner(classId, objectId, new_ownerId); /* Release memory */ @@ -1056,6 +1061,16 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) pfree(nulls); pfree(replaces); } + else + { + /* + * No need to change anything. But when working on a large object, we + * have to translate back to the OID conventionally used for LOs' + * classId, or the post-alter hook (if any) will get confused. + */ + if (classId == LargeObjectMetadataRelationId) + classId = LargeObjectRelationId; + } InvokeObjectPostAlterHook(classId, objectId, 0); } diff --git a/src/backend/commands/copyfrom.c b/src/backend/commands/copyfrom.c index c6dbd97..182047a 100644 --- a/src/backend/commands/copyfrom.c +++ b/src/backend/commands/copyfrom.c @@ -758,6 +758,9 @@ CopyFrom(CopyFromState cstate) * Can't support multi-inserts if there are any volatile function * expressions in WHERE clause. Similarly to the trigger case above, * such expressions may query the table we're inserting into. + * + * Note: the whereClause was already preprocessed in DoCopy(), so it's + * okay to use contain_volatile_functions() directly. */ insertMethod = CIM_SINGLE; } @@ -1453,7 +1456,8 @@ BeginCopyFrom(ParseState *pstate, * known to be safe for use with the multi-insert * optimization. Hence we use this special case function * checker rather than the standard check for - * contain_volatile_functions(). + * contain_volatile_functions(). Note also that we already + * ran the expression through expression_planner(). */ if (!volatile_defexprs) volatile_defexprs = contain_volatile_functions_not_nextval((Node *) defexpr); diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 93f0c73..5ced6da 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -461,35 +461,12 @@ CreateDirAndVersionFile(char *dbpath, Oid dbid, Oid tsid, bool isRedo) char buf[16]; /* - * Prepare version data before starting a critical section. - * - * Note that we don't have to copy this from the source database; there's - * only one legal value. + * Note that we don't have to copy version data from the source database; + * there's only one legal value. */ sprintf(buf, "%s\n", PG_MAJORVERSION); nbytes = strlen(PG_MAJORVERSION) + 1; - /* If we are not in WAL replay then write the WAL. */ - if (!isRedo) - { - xl_dbase_create_wal_log_rec xlrec; - XLogRecPtr lsn; - - START_CRIT_SECTION(); - - xlrec.db_id = dbid; - xlrec.tablespace_id = tsid; - - XLogBeginInsert(); - XLogRegisterData((char *) (&xlrec), - sizeof(xl_dbase_create_wal_log_rec)); - - lsn = XLogInsert(RM_DBASE_ID, XLOG_DBASE_CREATE_WAL_LOG); - - /* As always, WAL must hit the disk before the data update does. */ - XLogFlush(lsn); - } - /* Create database directory. */ if (MakePGDirectory(dbpath) < 0) { @@ -530,12 +507,35 @@ CreateDirAndVersionFile(char *dbpath, Oid dbid, Oid tsid, bool isRedo) } pgstat_report_wait_end(); + pgstat_report_wait_start(WAIT_EVENT_VERSION_FILE_SYNC); + if (pg_fsync(fd) != 0) + ereport(data_sync_elevel(ERROR), + (errcode_for_file_access(), + errmsg("could not fsync file \"%s\": %m", versionfile))); + fsync_fname(dbpath, true); + pgstat_report_wait_end(); + /* Close the version file. */ CloseTransientFile(fd); - /* Critical section done. */ + /* If we are not in WAL replay then write the WAL. */ if (!isRedo) + { + xl_dbase_create_wal_log_rec xlrec; + + START_CRIT_SECTION(); + + xlrec.db_id = dbid; + xlrec.tablespace_id = tsid; + + XLogBeginInsert(); + XLogRegisterData((char *) (&xlrec), + sizeof(xl_dbase_create_wal_log_rec)); + + (void) XLogInsert(RM_DBASE_ID, XLOG_DBASE_CREATE_WAL_LOG); + END_CRIT_SECTION(); + } } /* diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 060c618..fa0b79d 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -3597,7 +3597,7 @@ show_buffer_usage(ExplainState *es, const BufferUsage *usage, bool planning) if (has_timing) { - appendStringInfoString(es->str, " shared/local"); + appendStringInfoString(es->str, " shared"); if (!INSTR_TIME_IS_ZERO(usage->blk_read_time)) appendStringInfo(es->str, " read=%0.3f", INSTR_TIME_GET_MILLISEC(usage->blk_read_time)); diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index d3f7b09..c623432 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -1003,10 +1003,13 @@ DefineIndex(Oid relationId, { if (key->partattrs[i] == indexInfo->ii_IndexAttrNumbers[j]) { - /* Matched the column, now what about the equality op? */ + /* Matched the column, now what about the collation and equality op? */ Oid idx_opfamily; Oid idx_opcintype; + if (key->partcollation[i] != collationObjectId[j]) + continue; + if (get_opclass_opfamily_and_input_type(classObjectId[j], &idx_opfamily, &idx_opcintype)) @@ -1712,33 +1715,6 @@ DefineIndex(Oid relationId, /* - * CheckMutability - * Test whether given expression is mutable - */ -static bool -CheckMutability(Expr *expr) -{ - /* - * First run the expression through the planner. This has a couple of - * important consequences. First, function default arguments will get - * inserted, which may affect volatility (consider "default now()"). - * Second, inline-able functions will get inlined, which may allow us to - * conclude that the function is really less volatile than it's marked. As - * an example, polymorphic functions must be marked with the most volatile - * behavior that they have for any input type, but once we inline the - * function we may be able to conclude that it's not so volatile for the - * particular input type we're dealing with. - * - * We assume here that expression_planner() won't scribble on its input. - */ - expr = expression_planner(expr); - - /* Now we can search for non-immutable functions */ - return contain_mutable_functions((Node *) expr); -} - - -/* * CheckPredicate * Checks that the given partial-index predicate is valid. * @@ -1761,7 +1737,7 @@ CheckPredicate(Expr *predicate) * A predicate using mutable functions is probably wrong, for the same * reasons that we don't allow an index expression to use one. */ - if (CheckMutability(predicate)) + if (contain_mutable_functions_after_planning(predicate)) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("functions in index predicate must be marked IMMUTABLE"))); @@ -1904,7 +1880,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo, * same data every time, it's not clear what the index entries * mean at all. */ - if (CheckMutability((Expr *) expr)) + if (contain_mutable_functions_after_planning((Expr *) expr)) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("functions in index expression must be marked IMMUTABLE"))); diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index d1ee106..a5f8972 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -656,13 +656,35 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, SPI_getvalue(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1)))); } + /* + * Create the temporary "diff" table. + * + * Temporarily switch out of the SECURITY_RESTRICTED_OPERATION context, + * because you cannot create temp tables in SRO context. For extra + * paranoia, add the composite type column only after switching back to + * SRO context. + */ SetUserIdAndSecContext(relowner, save_sec_context | SECURITY_LOCAL_USERID_CHANGE); + resetStringInfo(&querybuf); + appendStringInfo(&querybuf, + "CREATE TEMP TABLE %s (tid pg_catalog.tid)", + diffname); + if (SPI_exec(querybuf.data, 0) != SPI_OK_UTILITY) + elog(ERROR, "SPI_exec failed: %s", querybuf.data); + SetUserIdAndSecContext(relowner, + save_sec_context | SECURITY_RESTRICTED_OPERATION); + resetStringInfo(&querybuf); + appendStringInfo(&querybuf, + "ALTER TABLE %s ADD COLUMN newdata %s", + diffname, tempname); + if (SPI_exec(querybuf.data, 0) != SPI_OK_UTILITY) + elog(ERROR, "SPI_exec failed: %s", querybuf.data); - /* Start building the query for creating the diff table. */ + /* Start building the query for populating the diff table. */ resetStringInfo(&querybuf); appendStringInfo(&querybuf, - "CREATE TEMP TABLE %s AS " + "INSERT INTO %s " "SELECT mv.ctid AS tid, newdata.*::%s AS newdata " "FROM %s mv FULL JOIN %s newdata ON (", diffname, tempname, matviewname, tempname); @@ -782,22 +804,22 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, * * ExecRefreshMatView() checks that after taking the exclusive lock on the * matview. So at least one unique index is guaranteed to exist here - * because the lock is still being held; so an Assert seems sufficient. + * because the lock is still being held. (One known exception is if a + * function called as part of refreshing the matview drops the index. + * That's a pretty silly thing to do.) */ - Assert(foundUniqueIndex); + if (!foundUniqueIndex) + elog(ERROR, "could not find suitable unique index on materialized view"); appendStringInfoString(&querybuf, " AND newdata.* OPERATOR(pg_catalog.*=) mv.*) " "WHERE newdata.* IS NULL OR mv.* IS NULL " "ORDER BY tid"); - /* Create the temporary "diff" table. */ - if (SPI_exec(querybuf.data, 0) != SPI_OK_UTILITY) + /* Populate the temporary "diff" table. */ + if (SPI_exec(querybuf.data, 0) != SPI_OK_INSERT) elog(ERROR, "SPI_exec failed: %s", querybuf.data); - SetUserIdAndSecContext(relowner, - save_sec_context | SECURITY_RESTRICTED_OPERATION); - /* * We have no further use for data from the "full-data" temp table, but we * must keep it around because its type is referenced from the diff table. diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c index f442d85..fa7a0c0 100644 --- a/src/backend/commands/statscmds.c +++ b/src/backend/commands/statscmds.c @@ -735,19 +735,12 @@ void RemoveStatisticsById(Oid statsOid) { Relation relation; + Relation rel; HeapTuple tup; Form_pg_statistic_ext statext; Oid relid; /* - * First delete the pg_statistic_ext_data tuples holding the actual - * statistical data. There might be data with/without inheritance, so - * attempt deleting both. - */ - RemoveStatisticsDataById(statsOid, true); - RemoveStatisticsDataById(statsOid, false); - - /* * Delete the pg_statistic_ext tuple. Also send out a cache inval on the * associated table, so that dependent plans will be rebuilt. */ @@ -761,12 +754,26 @@ RemoveStatisticsById(Oid statsOid) statext = (Form_pg_statistic_ext) GETSTRUCT(tup); relid = statext->stxrelid; + /* + * Delete the pg_statistic_ext_data tuples holding the actual statistical + * data. There might be data with/without inheritance, so attempt deleting + * both. We lock the user table first, to prevent other processes (e.g. + * DROP STATISTICS) from removing the row concurrently. + */ + rel = table_open(relid, ShareUpdateExclusiveLock); + + RemoveStatisticsDataById(statsOid, true); + RemoveStatisticsDataById(statsOid, false); + CacheInvalidateRelcacheByRelid(relid); CatalogTupleDelete(relation, &tup->t_self); ReleaseSysCache(tup); + /* Keep lock until the end of the transaction. */ + table_close(rel, NoLock); + table_close(relation, RowExclusiveLock); } diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 97f9a22..5daa560 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -6805,6 +6805,10 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, colDef->colname, RelationGetRelationName(rel)))); table_close(attrdesc, RowExclusiveLock); + + /* Make the child column change visible */ + CommandCounterIncrement(); + return InvalidObjectAddress; } } @@ -11410,15 +11414,19 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, /* * transformFkeyCheckAttrs - * - * Make sure that the attributes of a referenced table belong to a unique - * (or primary key) constraint. Return the OID of the index supporting - * the constraint, as well as the opclasses associated with the index - * columns. + * Validate that the 'attnums' columns in the 'pkrel' relation are valid to + * reference as part of a foreign key constraint. + * + * Returns the OID of the unique index supporting the constraint and + * populates the caller-provided 'opclasses' array with the opclasses + * associated with the index columns. + * + * Raises an ERROR on validation failure. */ static Oid transformFkeyCheckAttrs(Relation pkrel, int numattrs, int16 *attnums, - Oid *opclasses) /* output parameter */ + Oid *opclasses) { Oid indexoid = InvalidOid; bool found = false; @@ -17407,30 +17415,6 @@ ComputePartitionAttrs(ParseState *pstate, Relation rel, List *partParams, AttrNu *partexprs = lappend(*partexprs, expr); /* - * Try to simplify the expression before checking for - * mutability. The main practical value of doing it in this - * order is that an inline-able SQL-language function will be - * accepted if its expansion is immutable, whether or not the - * function itself is marked immutable. - * - * Note that expression_planner does not change the passed in - * expression destructively and we have already saved the - * expression to be stored into the catalog above. - */ - expr = (Node *) expression_planner((Expr *) expr); - - /* - * Partition expression cannot contain mutable functions, - * because a given row must always map to the same partition - * as long as there is no change in the partition boundary - * structure. - */ - if (contain_mutable_functions(expr)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("functions in partition key expression must be marked IMMUTABLE"))); - - /* * transformPartitionSpec() should have already rejected * subqueries, aggregates, window functions, and SRFs, based * on the EXPR_KIND_ for partition expressions. @@ -17472,6 +17456,32 @@ ComputePartitionAttrs(ParseState *pstate, Relation rel, List *partParams, AttrNu } /* + * Preprocess the expression before checking for mutability. + * This is essential for the reasons described in + * contain_mutable_functions_after_planning. However, we call + * expression_planner for ourselves rather than using that + * function, because if constant-folding reduces the + * expression to a constant, we'd like to know that so we can + * complain below. + * + * Like contain_mutable_functions_after_planning, assume that + * expression_planner won't scribble on its input, so this + * won't affect the partexprs entry we saved above. + */ + expr = (Node *) expression_planner((Expr *) expr); + + /* + * Partition expressions cannot contain mutable functions, + * because a given row must always map to the same partition + * as long as there is no change in the partition boundary + * structure. + */ + if (contain_mutable_functions(expr)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("functions in partition key expression must be marked IMMUTABLE"))); + + /* * While it is not exactly *wrong* for a partition expression * to be a constant, it seems better to reject such keys. */ diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 0769ae3..72b1134 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -3056,10 +3056,6 @@ ExecBRUpdateTriggersNew(EState *estate, EPQState *epqstate, * received in newslot. Neither we nor our callers have any further * interest in the passed-in tuple, so it's okay to overwrite newslot * with the newer data. - * - * (Typically, newslot was also generated by ExecGetUpdateNewTuple, so - * that epqslot_clean will be that same slot and the copy step below - * is not needed.) */ if (epqslot_candidate != NULL) { @@ -3068,14 +3064,36 @@ ExecBRUpdateTriggersNew(EState *estate, EPQState *epqstate, epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate, oldslot); - if (newslot != epqslot_clean) + /* + * Typically, the caller's newslot was also generated by + * ExecGetUpdateNewTuple, so that epqslot_clean will be the same + * slot and copying is not needed. But do the right thing if it + * isn't. + */ + if (unlikely(newslot != epqslot_clean)) ExecCopySlot(newslot, epqslot_clean); + + /* + * At this point newslot contains a virtual tuple that may + * reference some fields of oldslot's tuple in some disk buffer. + * If that tuple is in a different page than the original target + * tuple, then our only pin on that buffer is oldslot's, and we're + * about to release it. Hence we'd better materialize newslot to + * ensure it doesn't contain references into an unpinned buffer. + * (We'd materialize it below anyway, but too late for safety.) + */ + ExecMaterializeSlot(newslot); } + /* + * Here we convert oldslot to a materialized slot holding trigtuple. + * Neither slot passed to the triggers will hold any buffer pin. + */ trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig); } else { + /* Put the FDW-supplied tuple into oldslot to unify the cases */ ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false); trigtuple = fdw_trigtuple; } diff --git a/src/backend/commands/tsearchcmds.c b/src/backend/commands/tsearchcmds.c index 4cc4e3c..7aa166f 100644 --- a/src/backend/commands/tsearchcmds.c +++ b/src/backend/commands/tsearchcmds.c @@ -48,6 +48,12 @@ #include "utils/rel.h" #include "utils/syscache.h" +/* Single entry of List returned by getTokenTypes() */ +typedef struct +{ + int num; /* token type number */ + char *name; /* token type name */ +} TSTokenTypeItem; static void MakeConfigurationMapping(AlterTSConfigurationStmt *stmt, HeapTuple tup, Relation relMap); @@ -1151,22 +1157,45 @@ AlterTSConfiguration(AlterTSConfigurationStmt *stmt) } /* - * Translate a list of token type names to an array of token type numbers + * Check whether a token type name is a member of a TSTokenTypeItem list. */ -static int * +static bool +tstoken_list_member(char *token_name, List *tokens) +{ + ListCell *c; + bool found = false; + + foreach(c, tokens) + { + TSTokenTypeItem *ts = (TSTokenTypeItem *) lfirst(c); + + if (strcmp(token_name, ts->name) == 0) + { + found = true; + break; + } + } + + return found; +} + +/* + * Translate a list of token type names to a list of unique TSTokenTypeItem. + * + * Duplicated entries list are removed from tokennames. + */ +static List * getTokenTypes(Oid prsId, List *tokennames) { TSParserCacheEntry *prs = lookup_ts_parser_cache(prsId); LexDescr *list; - int *res, - i, - ntoken; + List *result = NIL; + int ntoken; ListCell *tn; ntoken = list_length(tokennames); if (ntoken == 0) - return NULL; - res = (int *) palloc(sizeof(int) * ntoken); + return NIL; if (!OidIsValid(prs->lextypeOid)) elog(ERROR, "method lextype isn't defined for text search parser %u", @@ -1176,19 +1205,26 @@ getTokenTypes(Oid prsId, List *tokennames) list = (LexDescr *) DatumGetPointer(OidFunctionCall1(prs->lextypeOid, (Datum) 0)); - i = 0; foreach(tn, tokennames) { String *val = lfirst_node(String, tn); bool found = false; int j; + /* Skip if this token is already in the result */ + if (tstoken_list_member(strVal(val), result)) + continue; + j = 0; while (list && list[j].lexid) { if (strcmp(strVal(val), list[j].alias) == 0) { - res[i] = list[j].lexid; + TSTokenTypeItem *ts = (TSTokenTypeItem *) palloc0(sizeof(TSTokenTypeItem)); + + ts->num = list[j].lexid; + ts->name = pstrdup(strVal(val)); + result = lappend(result, ts); found = true; break; } @@ -1199,10 +1235,9 @@ getTokenTypes(Oid prsId, List *tokennames) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("token type \"%s\" does not exist", strVal(val)))); - i++; } - return res; + return result; } /* @@ -1220,8 +1255,7 @@ MakeConfigurationMapping(AlterTSConfigurationStmt *stmt, int i; int j; Oid prsId; - int *tokens, - ntoken; + List *tokens = NIL; Oid *dictIds; int ndict; ListCell *c; @@ -1231,15 +1265,16 @@ MakeConfigurationMapping(AlterTSConfigurationStmt *stmt, prsId = tsform->cfgparser; tokens = getTokenTypes(prsId, stmt->tokentype); - ntoken = list_length(stmt->tokentype); if (stmt->override) { /* * delete maps for tokens if they exist and command was ALTER */ - for (i = 0; i < ntoken; i++) + foreach(c, tokens) { + TSTokenTypeItem *ts = (TSTokenTypeItem *) lfirst(c); + ScanKeyInit(&skey[0], Anum_pg_ts_config_map_mapcfg, BTEqualStrategyNumber, F_OIDEQ, @@ -1247,7 +1282,7 @@ MakeConfigurationMapping(AlterTSConfigurationStmt *stmt, ScanKeyInit(&skey[1], Anum_pg_ts_config_map_maptokentype, BTEqualStrategyNumber, F_INT4EQ, - Int32GetDatum(tokens[i])); + Int32GetDatum(ts->num)); scan = systable_beginscan(relMap, TSConfigMapIndexId, true, NULL, 2, skey); @@ -1302,9 +1337,11 @@ MakeConfigurationMapping(AlterTSConfigurationStmt *stmt, { bool tokmatch = false; - for (j = 0; j < ntoken; j++) + foreach(c, tokens) { - if (cfgmap->maptokentype == tokens[j]) + TSTokenTypeItem *ts = (TSTokenTypeItem *) lfirst(c); + + if (cfgmap->maptokentype == ts->num) { tokmatch = true; break; @@ -1345,8 +1382,10 @@ MakeConfigurationMapping(AlterTSConfigurationStmt *stmt, /* * Insertion of new entries */ - for (i = 0; i < ntoken; i++) + foreach(c, tokens) { + TSTokenTypeItem *ts = (TSTokenTypeItem *) lfirst(c); + for (j = 0; j < ndict; j++) { Datum values[Natts_pg_ts_config_map]; @@ -1354,7 +1393,7 @@ MakeConfigurationMapping(AlterTSConfigurationStmt *stmt, memset(nulls, false, sizeof(nulls)); values[Anum_pg_ts_config_map_mapcfg - 1] = ObjectIdGetDatum(cfgId); - values[Anum_pg_ts_config_map_maptokentype - 1] = Int32GetDatum(tokens[i]); + values[Anum_pg_ts_config_map_maptokentype - 1] = Int32GetDatum(ts->num); values[Anum_pg_ts_config_map_mapseqno - 1] = Int32GetDatum(j + 1); values[Anum_pg_ts_config_map_mapdict - 1] = ObjectIdGetDatum(dictIds[j]); @@ -1381,9 +1420,8 @@ DropConfigurationMapping(AlterTSConfigurationStmt *stmt, ScanKeyData skey[2]; SysScanDesc scan; HeapTuple maptup; - int i; Oid prsId; - int *tokens; + List *tokens = NIL; ListCell *c; tsform = (Form_pg_ts_config) GETSTRUCT(tup); @@ -1392,10 +1430,9 @@ DropConfigurationMapping(AlterTSConfigurationStmt *stmt, tokens = getTokenTypes(prsId, stmt->tokentype); - i = 0; - foreach(c, stmt->tokentype) + foreach(c, tokens) { - String *val = lfirst_node(String, c); + TSTokenTypeItem *ts = (TSTokenTypeItem *) lfirst(c); bool found = false; ScanKeyInit(&skey[0], @@ -1405,7 +1442,7 @@ DropConfigurationMapping(AlterTSConfigurationStmt *stmt, ScanKeyInit(&skey[1], Anum_pg_ts_config_map_maptokentype, BTEqualStrategyNumber, F_INT4EQ, - Int32GetDatum(tokens[i])); + Int32GetDatum(ts->num)); scan = systable_beginscan(relMap, TSConfigMapIndexId, true, NULL, 2, skey); @@ -1425,17 +1462,15 @@ DropConfigurationMapping(AlterTSConfigurationStmt *stmt, ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("mapping for token type \"%s\" does not exist", - strVal(val)))); + ts->name))); } else { ereport(NOTICE, (errmsg("mapping for token type \"%s\" does not exist, skipping", - strVal(val)))); + ts->name))); } } - - i++; } EventTriggerCollectAlterTSConfig(stmt, cfgId, NULL, 0); |