diff --git a/GNUmakefile.in b/GNUmakefile.in index 623074305ce..70f635b16e7 100644 --- a/GNUmakefile.in +++ b/GNUmakefile.in @@ -115,7 +115,7 @@ $(call recurse,distprep coverage,doc src config contrib gpcontrib) # it's not built by default $(call recurse,clean,doc contrib gpcontrib src config) clean: - rm -rf tmp_install/ + rm -rf tmp_install/ portlock/ # Garbage from autoconf: @rm -rf autom4te.cache/ # leap over gpAux/Makefile into subdirectories to avoid circular dependency. @@ -132,7 +132,7 @@ distclean maintainer-clean: $(MAKE) -C config $@ $(MAKE) -C gpMgmt $@ $(MAKE) -C src $@ - rm -rf tmp_install/ + rm -rf tmp_install/ portlock/ # Garbage from autoconf: @rm -rf autom4te.cache/ rm -f config.cache config.log config.status GNUmakefile diff --git a/contrib/seg/expected/seg.out b/contrib/seg/expected/seg.out index d20b7d60e15..b67e93f9c18 100644 --- a/contrib/seg/expected/seg.out +++ b/contrib/seg/expected/seg.out @@ -256,6 +256,13 @@ SELECT '12.34567890123456'::seg AS seg; 12.3457 (1 row) +-- Same, with a very long input +SELECT '12.3456789012345600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'::seg AS seg; + seg +--------- + 12.3457 +(1 row) + -- Numbers with certainty indicators SELECT '~6.5'::seg AS seg; seg diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c index 4a8e2be3290..91b8a796004 100644 --- a/contrib/seg/seg.c +++ b/contrib/seg/seg.c @@ -927,9 +927,13 @@ restore(char *result, float val, int n) /* * Put a cap on the number of significant digits to avoid garbage in the - * output and ensure we don't overrun the result buffer. + * output and ensure we don't overrun the result buffer. (n should not be + * negative, but check to protect ourselves against corrupted data.) */ - n = Min(n, FLT_DIG); + if (n <= 0) + n = FLT_DIG; + else + n = Min(n, FLT_DIG); /* remember the sign */ sign = (val < 0 ? 1 : 0); diff --git a/contrib/seg/segparse.y b/contrib/seg/segparse.y index 040cab39041..3115b12ebd4 100644 --- a/contrib/seg/segparse.y +++ b/contrib/seg/segparse.y @@ -3,6 +3,7 @@ #include "postgres.h" +#include #include #include "fmgr.h" @@ -23,6 +24,8 @@ static float seg_atof(const char *value); +static int sig_digits(const char *value); + static char strbuf[25] = { '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', @@ -63,9 +66,9 @@ range: boundary PLUMIN deviation result->lower = $1.val - $3.val; result->upper = $1.val + $3.val; sprintf(strbuf, "%g", result->lower); - result->l_sigd = Max(Min(6, significant_digits(strbuf)), Max($1.sigd, $3.sigd)); + result->l_sigd = Max(sig_digits(strbuf), Max($1.sigd, $3.sigd)); sprintf(strbuf, "%g", result->upper); - result->u_sigd = Max(Min(6, significant_digits(strbuf)), Max($1.sigd, $3.sigd)); + result->u_sigd = Max(sig_digits(strbuf), Max($1.sigd, $3.sigd)); result->l_ext = '\0'; result->u_ext = '\0'; } @@ -122,7 +125,7 @@ boundary: SEGFLOAT float val = seg_atof($1); $$.ext = '\0'; - $$.sigd = significant_digits($1); + $$.sigd = sig_digits($1); $$.val = val; } | EXTENSION SEGFLOAT @@ -131,7 +134,7 @@ boundary: SEGFLOAT float val = seg_atof($2); $$.ext = $1[0]; - $$.sigd = significant_digits($2); + $$.sigd = sig_digits($2); $$.val = val; } ; @@ -142,7 +145,7 @@ deviation: SEGFLOAT float val = seg_atof($1); $$.ext = '\0'; - $$.sigd = significant_digits($1); + $$.sigd = sig_digits($1); $$.val = val; } ; @@ -159,5 +162,14 @@ seg_atof(const char *value) return DatumGetFloat4(datum); } +static int +sig_digits(const char *value) +{ + int n = significant_digits(value); + + /* Clamp, to ensure value will fit in sigd fields */ + return Min(n, FLT_DIG); +} + #include "segscan.c" diff --git a/contrib/seg/sql/seg.sql b/contrib/seg/sql/seg.sql index eb7c7138f82..8decf0937bf 100644 --- a/contrib/seg/sql/seg.sql +++ b/contrib/seg/sql/seg.sql @@ -60,6 +60,9 @@ SELECT '3.400e5'::seg AS seg; -- Digits truncated SELECT '12.34567890123456'::seg AS seg; +-- Same, with a very long input +SELECT '12.3456789012345600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'::seg AS seg; + -- Numbers with certainty indicators SELECT '~6.5'::seg AS seg; SELECT '<6.5'::seg AS seg; diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 514cac29e60..23f60cad528 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -756,7 +756,7 @@ include_dir 'conf.d' A value that starts with @ specifies that a Unix-domain socket in the abstract namespace should be created - (currently supported on Linux and Windows). In that case, this value + (currently supported on Linux only). In that case, this value does not specify a directory but a prefix from which the actual socket name is computed in the same manner as for the file-system namespace. While the abstract socket name prefix can be @@ -10734,7 +10734,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' - + trace_locks (boolean) trace_locks configuration parameter @@ -10775,7 +10775,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) - + trace_lwlocks (boolean) trace_lwlocks configuration parameter @@ -10795,7 +10795,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) - + trace_userlocks (boolean) trace_userlocks configuration parameter @@ -10814,7 +10814,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) - + trace_lock_oidmin (integer) trace_lock_oidmin configuration parameter @@ -10833,7 +10833,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) - + trace_lock_table (integer) trace_lock_table configuration parameter @@ -10851,7 +10851,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) - + debug_deadlocks (boolean) debug_deadlocks configuration parameter @@ -10870,7 +10870,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) - + log_btree_build_stats (boolean) log_btree_build_stats configuration parameter diff --git a/doc/src/sgml/event-trigger.sgml b/doc/src/sgml/event-trigger.sgml index 9c66f97b0f6..a76e8ac09be 100644 --- a/doc/src/sgml/event-trigger.sgml +++ b/doc/src/sgml/event-trigger.sgml @@ -1194,8 +1194,9 @@ noddl(PG_FUNCTION_ARGS) trigdata = (EventTriggerData *) fcinfo->context; ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("command \"%s\" denied", trigdata->tag))); + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("command \"%s\" denied", + GetCommandTagName(trigdata->tag)))); PG_RETURN_NULL(); } diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 7960cc2a5aa..ba8bccc135d 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -1598,6 +1598,10 @@ repeat('Pg', 4) PgPgPgPg round(42.4382, 2) 42.44 + + + round(1234.56, -1) + 1230 diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index fd230d9fd0e..efd039381d6 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -5024,10 +5024,11 @@ int PQflush(PGconn *conn); - While the pipeline API was introduced in + While libpq's pipeline API was introduced in PostgreSQL 14, it is a client-side feature which doesn't require special server support and works on any server - that supports the v3 extended query protocol. + that supports the v3 extended query protocol. For more information see + . diff --git a/doc/src/sgml/parallel.sgml b/doc/src/sgml/parallel.sgml index 3e8326d46c8..2b2c8cbffd6 100644 --- a/doc/src/sgml/parallel.sgml +++ b/doc/src/sgml/parallel.sgml @@ -128,7 +128,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; In addition, the system must not be running in single-user mode. Since - the entire database system is running in single process in this situation, + the entire database system is running as a single process in this situation, no background workers will be available. diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index cf1fadcda4b..7141f6c277a 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -1093,9 +1093,10 @@ SELCT 1/0; implicit ROLLBACK if they failed. However, there are a few DDL commands (such as CREATE DATABASE) that cannot be executed inside a transaction block. If one of - these is executed in a pipeline, it will, upon success, force an - immediate commit to preserve database consistency. - A Sync immediately following one of these has no effect except to + these is executed in a pipeline, it will fail unless it is the first + command in the pipeline. Furthermore, upon success it will force an + immediate commit to preserve database consistency. Thus a Sync + immediately following one of these commands has no effect except to respond with ReadyForQuery. @@ -1103,7 +1104,7 @@ SELCT 1/0; When using this method, completion of the pipeline must be determined by counting ReadyForQuery messages and waiting for that to reach the number of Syncs sent. Counting command completion responses is - unreliable, since some of the commands may not be executed and thus not + unreliable, since some of the commands may be skipped and thus not produce a completion message. diff --git a/doc/src/sgml/query.sgml b/doc/src/sgml/query.sgml index 9046d7c9fbe..a864d146f02 100644 --- a/doc/src/sgml/query.sgml +++ b/doc/src/sgml/query.sgml @@ -706,40 +706,39 @@ SELECT city FROM weather HAVING Aggregates are also very useful in combination with GROUP - BY clauses. For example, we can get the maximum low - temperature observed in each city with: + BY clauses. For example, we can get the number of readings + and the maximum low temperature observed in each city with: -SELECT city, max(temp_lo) +SELECT city, count(*), max(temp_lo) FROM weather GROUP BY city; - city | max ----------------+----- - Hayward | 37 - San Francisco | 46 + city | count | max +---------------+-------+----- + Hayward | 1 | 37 + San Francisco | 2 | 46 (2 rows) which gives us one output row per city. Each aggregate result is computed over the table rows matching that city. We can filter these grouped - rows using HAVING and the output count using - FILTER: + rows using HAVING: -SELECT city, max(temp_lo), count(*) FILTER (WHERE temp_lo < 30) +SELECT city, count(*), max(temp_lo) FROM weather GROUP BY city HAVING max(temp_lo) < 40; - city | max | count ----------+-----+------- - Hayward | 37 | 5 + city | count | max +---------+-------+----- + Hayward | 1 | 37 (1 row) @@ -749,12 +748,18 @@ SELECT city, max(temp_lo), count(*) FILTER (WHERE temp_lo < 30) names begin with S, we might do: -SELECT city, max(temp_lo), count(*) FILTER (WHERE temp_lo < 30) +SELECT city, count(*), max(temp_lo) FROM weather WHERE city LIKE 'S%' -- - GROUP BY city - HAVING max(temp_lo) < 40; + GROUP BY city; + + + city | count | max +---------------+-------+----- + San Francisco | 2 | 46 +(1 row) + @@ -791,6 +796,34 @@ SELECT city, max(temp_lo), count(*) FILTER (WHERE temp_lo < 30) because we avoid doing the grouping and aggregate calculations for all rows that fail the WHERE check. + + + Another way to select the rows that go into an aggregate + computation is to use FILTER, which is a + per-aggregate option: + + +SELECT city, count(*) FILTER (WHERE temp_lo < 45), max(temp_lo) + FROM weather + GROUP BY city; + + + + city | count | max +---------------+-------+----- + Hayward | 1 | 37 + San Francisco | 1 | 46 +(2 rows) + + + FILTER is much like WHERE, + except that it removes rows only from the input of the particular + aggregate function that it is attached to. + Here, the count aggregate counts only + rows with temp_lo below 45; but the + max aggregate is still applied to all rows, + so it still finds the reading of 46. + diff --git a/doc/src/sgml/ref/alter_role.sgml b/doc/src/sgml/ref/alter_role.sgml index c9047374eff..af2c1bec08e 100644 --- a/doc/src/sgml/ref/alter_role.sgml +++ b/doc/src/sgml/ref/alter_role.sgml @@ -341,7 +341,7 @@ ALTER ROLE fred VALID UNTIL 'infinity'; - Give a role the ability to create other roles and new databases: + Give a role the ability to manage other roles and create new databases: ALTER ROLE miriam CREATEROLE CREATEDB; diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml index cdcfbdb5b6b..ba7a077ae10 100644 --- a/doc/src/sgml/ref/alter_table.sgml +++ b/doc/src/sgml/ref/alter_table.sgml @@ -1050,7 +1050,7 @@ Where column_reference_storage_directive is: constraint. This does not work, however, if any of the partition keys is an expression and the partition does not accept NULL values. If attaching a list partition that will - not accept NULL values, also add + not accept NULL values, also add a NOT NULL constraint to the partition key column, unless it's an expression. diff --git a/doc/src/sgml/ref/comment.sgml b/doc/src/sgml/ref/comment.sgml index 850e5ba4afc..17f6b816365 100644 --- a/doc/src/sgml/ref/comment.sgml +++ b/doc/src/sgml/ref/comment.sgml @@ -67,7 +67,7 @@ COMMENT ON TRIGGER trigger_name ON table_name | TYPE object_name | VIEW object_name -} IS 'text' +} IS { string_literal | NULL } where aggregate_signature is: @@ -263,11 +263,19 @@ COMMENT ON - text + string_literal - The new comment, written as a string literal; or NULL - to drop the comment. + The new comment contents, written as a string literal. + + + + + + NULL + + + Write NULL to drop the comment. diff --git a/doc/src/sgml/ref/create_role.sgml b/doc/src/sgml/ref/create_role.sgml index d6e06939a3a..8cd66949832 100644 --- a/doc/src/sgml/ref/create_role.sgml +++ b/doc/src/sgml/ref/create_role.sgml @@ -126,11 +126,11 @@ in sync when changing the above synopsis! These clauses determine whether a role will be permitted to - create new roles (that is, execute CREATE ROLE). - A role with CREATEROLE privilege can also alter - and drop other roles. - If not specified, - NOCREATEROLE is the default. + create, alter, drop, comment on, change the security label for, + and grant or revoke membership in other roles. + See for more details about what + capabilities are conferred by this privilege. + If not specified, NOCREATEROLE is the default. diff --git a/doc/src/sgml/ref/createuser.sgml b/doc/src/sgml/ref/createuser.sgml index 17579e50afb..0e1a39a3fe6 100644 --- a/doc/src/sgml/ref/createuser.sgml +++ b/doc/src/sgml/ref/createuser.sgml @@ -41,10 +41,14 @@ PostgreSQL documentation - If you wish to create a new superuser, you must connect as a - superuser, not merely with CREATEROLE privilege. + If you wish to create a role with the SUPERUSER, + REPLICATION, or BYPASSRLS privilege, + you must connect as a superuser, not merely with + CREATEROLE privilege. Being a superuser implies the ability to bypass all access permission - checks within the database, so superuser access should not be granted lightly. + checks within the database, so superuser access should not be granted + lightly. CREATEROLE also conveys + very extensive privileges. @@ -221,8 +225,12 @@ PostgreSQL documentation - The new user will be allowed to create new roles (that is, - this user will have CREATEROLE privilege). + The new user will be allowed to create, alter, drop, comment on, + change the security label for, and grant or revoke membership in + other roles; that is, + this user will have CREATEROLE privilege. + See for more details about what + capabilities are conferred by this privilege. diff --git a/doc/src/sgml/ref/fetch.sgml b/doc/src/sgml/ref/fetch.sgml index 83d58e54b9d..f0f3ac2a028 100644 --- a/doc/src/sgml/ref/fetch.sgml +++ b/doc/src/sgml/ref/fetch.sgml @@ -29,8 +29,7 @@ PostgreSQL documentation FETCH [ direction ] [ FROM | IN ] cursor_name -where direction can -be one of: +where direction can be one of: NEXT PRIOR diff --git a/doc/src/sgml/ref/move.sgml b/doc/src/sgml/ref/move.sgml index 8378439debb..89b5a241013 100644 --- a/doc/src/sgml/ref/move.sgml +++ b/doc/src/sgml/ref/move.sgml @@ -29,8 +29,7 @@ PostgreSQL documentation MOVE [ direction ] [ FROM | IN ] cursor_name -where direction can -be one of: +where direction can be one of: NEXT PRIOR diff --git a/doc/src/sgml/ref/security_label.sgml b/doc/src/sgml/ref/security_label.sgml index 20a839ff0c3..5f96b7e1ded 100644 --- a/doc/src/sgml/ref/security_label.sgml +++ b/doc/src/sgml/ref/security_label.sgml @@ -44,7 +44,7 @@ SECURITY LABEL [ FOR provider ] ON TABLESPACE object_name | TYPE object_name | VIEW object_name -} IS 'label' +} IS { string_literal | NULL } where aggregate_signature is: @@ -178,11 +178,19 @@ SECURITY LABEL [ FOR provider ] ON - label + string_literal - The new security label, written as a string literal; or NULL - to drop the security label. + The new setting of the security label, written as a string literal. + + + + + + NULL + + + Write NULL to drop the security label. @@ -193,12 +201,19 @@ SECURITY LABEL [ FOR provider ] ON Examples - The following example shows how the security label of a table might - be changed. + The following example shows how the security label of a table could + be set or changed: SECURITY LABEL FOR selinux ON TABLE mytable IS 'system_u:object_r:sepgsql_table_t:s0'; - + + + To remove the label: + + +SECURITY LABEL FOR selinux ON TABLE mytable IS NULL; + + diff --git a/doc/src/sgml/release-14.sgml b/doc/src/sgml/release-14.sgml index 1b8092af963..f4d6d11ca4d 100644 --- a/doc/src/sgml/release-14.sgml +++ b/doc/src/sgml/release-14.sgml @@ -1,6 +1,1014 @@ + + Release 14.7 + + + Release date: + 2023-02-09 + + + + This release contains a variety of fixes from 14.6. + For information about new features in major release 14, see + . + + + + Migration to Version 14.7 + + + A dump/restore is not required for those running 14.X. + + + + However, if you are upgrading from a version earlier than 14.4, + see . + + + + + Changes + + + + + + + Fix calculation of which GENERATED columns need + to be updated in child tables during an UPDATE on + a partitioned table or inheritance tree (Amit Langote, Tom Lane) + + + + This fixes failure to update GENERATED columns + that do not exist in the parent table, or that have different + dependencies than are in the parent column's generation expression. + + + + + + + Allow a WITH RECURSIVE ... CYCLE CTE + to access its output column (Tom Lane) + + + + A reference to the SET column from within the CTE + would fail with cache lookup failed for type 0. + + + + + + + Fix handling of pending inserts when doing a bulk insertion to a + foreign table (Etsuro Fujita) + + + + In some cases pending insertions were not flushed to the FDW soon + enough, leading to logical inconsistencies, for + example BEFORE ROW triggers not seeing rows they + should be able to see. + + + + + + + Allow REPLICA IDENTITY + to be set on an index that's not (yet) valid (Tom Lane) + + + + When pg_dump dumps a partitioned index + that's marked REPLICA IDENTITY, it generates a + command sequence that applies REPLICA IDENTITY + before the partitioned index has been marked valid, causing restore + to fail. There seems no very good reason to prohibit doing it in + that order, so allow it. The marking will have no effect anyway + until the index becomes valid. + + + + + + + Fix handling of DEFAULT markers in rules that + perform an INSERT from a + multi-row VALUES list (Dean Rasheed) + + + + In some cases a DEFAULT marker would not get + replaced with the proper default-value expression, leading to + an unrecognized node type error. + + + + + + + Reject uses of undefined variables in jsonpath + existence checks (Alexander Korotkov, David G. Johnston) + + + + While jsonpath match operators threw an error for an + undefined variable in the path pattern, the existence operators + silently treated it as a match. + + + + + + + Fix jsonb subscripting to cope with toasted subscript + values (Tom Lane, David G. Johnston) + + + + Using a text value fetched directly from a table as + a jsonb subscript was likely to fail. + Fetches would usually not find any matching element. + Assignments could store the value with a garbage key, + although keys long enough to cause that problem are probably rare in + the field. + + + + + + + Fix edge-case data corruption in parallel hash joins (Dmitry Astapov) + + + + If the final chunk of a large tuple being written out to a temporary + file was exactly 32760 bytes, it would be corrupted due to a + fencepost bug. The query would typically fail later with + corrupted-data symptoms. + + + + + + + Honor non-default settings + of checkpoint_completion_target + (Bharath Rupireddy) + + + + Internal state was not updated after a change + in checkpoint_completion_target, possibly + resulting in performing checkpoint I/O faster or slower than + desired, especially if that setting was changed on-the-fly. + + + + + + + Log the correct ending timestamp + in recovery_target_xid mode (Tom Lane) + + + + When ending recovery based on the recovery_target_xid + setting with recovery_target_inclusive + = off, we printed an incorrect timestamp (always + 2000-01-01) in the recovery stopping before + ... transaction log message. + + + + + + + Improve error reporting for some buffered file read failures + (Peter Eisentraut) + + + + Correctly report a short read, giving the numbers of bytes desired + and actually read, instead of reporting an irrelevant error code. + Most places got this right already, but some recently-written + replication logic did not. + + + + + + + In extended query protocol, avoid an immediate commit + after ANALYZE if we're running a pipeline + (Tom Lane) + + + + If there's not been an explicit BEGIN + TRANSACTION, ANALYZE would take it on + itself to commit, which should not happen within a pipelined series + of commands. + + + + + + + Reject cancel request packets having the wrong length + (Andrey Borodin) + + + + The server would process a cancel request even if its length word + was too small. This led to reading beyond the end of the allocated + buffer. In theory that could cause a segfault, but it seems quite + unlikely to happen in practice, since the buffer would have to be + very close to the end of memory. The more likely outcome was a bogus + log message about wrong backend PID or cancel code. Complain about + the wrong length, instead. + + + + + + + Add recursion and looping defenses in subquery pullup (Tom Lane) + + + + A contrived query can result in deep recursion and unreasonable + amounts of time spent trying to flatten subqueries. A proper fix + for that seems unduly invasive for a back-patch, but we can at least + add stack depth checks and an interrupt check to allow the query to + be cancelled. + + + + + + + Fix planner issues when combining Memoize nodes with partitionwise + joins or parameterized nestloops (Richard Guo) + + + + These errors could lead to not using Memoize in contexts where it + would be useful, or possibly to wrong query plans. + + + + + + + Fix partitionwise-join code to tolerate failure to produce a plan for + each partition (Tom Lane) + + + + This could result in could not devise a query plan for the + given query errors. + + + + + + + Limit the amount of cleanup work done + by get_actual_variable_range (Simon Riggs) + + + + Planner runs occurring just after deletion of a large number of + tuples appearing at the end of an index could expend significant + amounts of work setting the killed bits for those + index entries. Limit the amount of work done in any one query by + giving up on this process after examining 100 heap pages. All the + cleanup will still happen eventually, but without so large a + performance hiccup. + + + + + + + Fix under-parenthesized display of AT TIME ZONE + constructs (Tom Lane) + + + + This could result in dump/restore failures for rules or views in + which an argument of AT TIME ZONE is itself an + expression. + + + + + + + Prevent clobbering of cached parsetrees for utility statements in + SQL functions (Tom Lane, Daniel Gustafsson) + + + + If a SQL-language function executes the same utility command more + than once within a single calling query, it could crash or report + strange errors such as unrecognized node type. + + + + + + + Ensure that execution of full-text-search queries can be cancelled + while they are performing phrase matches (Tom Lane) + + + + + + + Fix memory leak in hashing strings with nondeterministic collations + (Jeff Davis) + + + + + + + Fix deadlock between DROP DATABASE and logical + replication worker process (Hou Zhijie) + + + + This was caused by an ill-advised choice to block interrupts while + creating a logical replication slot in the worker. In version 15 + that could lead to an undetected deadlock. In version 14, no + deadlock has been observed, but it's still a bad idea to block + interrupts while waiting for network I/O. + + + + + + + Clean up the libpq connection object + after a failed replication connection attempt (Andres Freund) + + + + The previous coding leaked the connection object. In background + code paths that's pretty harmless because the calling process will + give up and exit. But in commands such as CREATE + SUBSCRIPTION, such a failure resulted in a small + session-lifespan memory leak. + + + + + + + In hot-standby servers, reduce processing effort for tracking XIDs + known to be active on the primary (Simon Riggs, Michail Nikolaev) + + + + Insufficiently-aggressive cleanup of the KnownAssignedXids array + could lead to poor performance, particularly + when max_connections is set to a large value on + the standby. + + + + + + + Ignore invalidated logical-replication slots while determining + oldest catalog xmin (Sirisha Chamarthi) + + + + A replication slot could prevent cleanup of dead tuples in the + system catalogs even after it becomes invalidated due to + exceeding max_slot_wal_keep_size. Thus, failure + of a replication consumer could lead to indefinitely-large catalog + bloat. + + + + + + + In logical decoding, notify the remote node when a transaction is + detected to have crashed (Hou Zhijie) + + + + After a server restart, we'll re-stream the changes for transactions + occurring shortly before the restart. Some of these transactions + probably never completed; when we realize that one didn't we throw + away the relevant decoding state locally, but we neglected to tell + the subscriber about it. That led to the subscriber keeping useless + streaming files until it's next restarted. + + + + + + + Fix uninitialized-memory usage in logical decoding (Masahiko Sawada) + + + + In certain cases, resumption of logical decoding could try to re-use + XID data that had already been freed, leading to unpredictable + behavior. + + + + + + + Avoid rare failed to acquire cleanup lock panic + during WAL replay of hash-index page split operations (Robert Haas) + + + + + + + Advance a heap page's LSN when setting its all-visible bit during + WAL replay (Jeff Davis) + + + + Failure to do this left the page possibly different on standby + servers than the primary, and violated some other expectations about + when the LSN changes. This seems only a theoretical hazard so + far as PostgreSQL itself is concerned, + but it could upset third-party tools. + + + + + + + Prevent unsafe usage of a relation cache + entry's rd_smgr pointer (Amul Sul) + + + + Remove various assumptions that rd_smgr + would stay valid over a series of operations, by wrapping all uses + of it in a function that will recompute it if needed. This prevents + bugs occurring when an unexpected cache flush occurs partway through + such a series. + + + + + + + Fix int64_div_fast_to_numeric() to work for a + wider range of inputs (Dean Rasheed) + + + + This function misbehaved with some values of its second argument. + No such usages exist in core PostgreSQL, + but it's clearly a hazard for external modules, so repair. + + + + + + + Fix latent buffer-overrun problem in WaitEventSet + logic (Thomas Munro) + + + + The epoll-based + and kqueue-based implementations could ask the + kernel for too many events if the size of their internal buffer was + different from the size of the caller's output buffer. That case is + not known to occur in released PostgreSQL + versions, but this error is a hazard for external modules and future + bug fixes. + + + + + + + Avoid nominally-undefined behavior when accessing shared memory in + 32-bit builds (Andres Freund) + + + + clang's undefined-behavior sanitizer complained about use of a + pointer that was less aligned than it should be. It's very unlikely + that this would cause a problem in non-debug builds, but it's worth + fixing for testing purposes. + + + + + + + Fix assertion failure in BRIN minmax-multi opclasses (Tomas Vondra) + + + + The assertion was overly strict, so this mistake was harmless in + non-assert builds. + + + + + + + Remove faulty assertion in useless-RESULT-RTE optimization logic + (Tom Lane) + + + + + + + Fix copy-and-paste errors in cache-lookup-failure messages for ACL + checks (Justin Pryzby) + + + + In principle these errors should never be reached. But if they are, + some of them reported the wrong type of object. + + + + + + + In pg_dump, + avoid calling unsafe server functions before we have locks on the + tables to be examined (Tom Lane, Gilles Darold) + + + + pg_dump uses certain server functions + that can fail if examining a table that gets dropped concurrently. + Avoid this type of failure by ensuring that we obtain access share + lock before inquiring too deeply into a table's properties, and that + we don't apply such functions to tables we don't intend to dump at + all. + + + + + + + Fix psql's \sf + and \ef commands to handle SQL-language functions + that have SQL-standard function bodies (Tom Lane) + + + + These commands misidentified the start of the function body when it + used new-style syntax. + + + + + + + Fix tab completion of ALTER + FUNCTION/PROCEDURE/ROUTINE ... SET + SCHEMA (Dean Rasheed) + + + + + + + Fix contrib/seg to not crash or print garbage + if an input number has more than 127 digits (Tom Lane) + + + + + + + Fix build on Microsoft Visual Studio 2013 (Tom Lane) + + + + A previous patch supposed that all platforms of interest + have snprintf(), but MSVC 2013 isn't quite + there yet. Revert to using sprintf() on that + platform. + + + + + + + Fix compile failure in building PL/Perl with MSVC when using + Strawberry Perl (Andrew Dunstan) + + + + + + + Fix mismatch of PL/Perl built with MSVC versus a Perl library built + with gcc (Andrew Dunstan) + + + + Such combinations could previously fail with loadable library + and perl binaries are mismatched errors. + + + + + + + Suppress compiler warnings from Perl's header files (Andres Freund) + + + + Our preferred compiler options provoke warnings about constructs + appearing in recent versions of Perl's header files. When using + gcc, we can suppress these warnings with + a pragma. + + + + + + + Fix pg_waldump to build on compilers that + don't discard unused static-inline functions (Tom Lane) + + + + + + + Update time zone data files to tzdata + release 2022g for DST law changes in Greenland and Mexico, + plus historical corrections for northern Canada, Colombia, and + Singapore. + + + + Notably, a new timezone America/Ciudad_Juarez has been split off + from America/Ojinaga. + + + + + + + + Release 14.6 diff --git a/doc/src/sgml/sources.sgml b/doc/src/sgml/sources.sgml index e6ae02f2af7..4ee99853b23 100644 --- a/doc/src/sgml/sources.sgml +++ b/doc/src/sgml/sources.sgml @@ -929,8 +929,8 @@ BETTER: unrecognized node type: 42 Function-Like Macros and Inline Functions - Both, macros with arguments and static inline - functions, may be used. The latter are preferable if there are + Both macros with arguments and static inline + functions may be used. The latter are preferable if there are multiple-evaluation hazards when written as a macro, as e.g., the case with diff --git a/doc/src/sgml/spgist.sgml b/doc/src/sgml/spgist.sgml index 00432512de9..102f8627bd0 100644 --- a/doc/src/sgml/spgist.sgml +++ b/doc/src/sgml/spgist.sgml @@ -91,18 +91,7 @@ |>> (box,box) - kd_point_ops - |>> (point,point) - <-> (point,point) - - << (point,point) - >> (point,point) - <<| (point,point) - ~= (point,point) - <@ (point,box) - - - network_ops + inet_ops << (inet,inet) @@ -117,6 +106,17 @@ >= (inet,inet) && (inet,inet) + + kd_point_ops + |>> (point,point) + <-> (point,point) + + << (point,point) + >> (point,point) + <<| (point,point) + ~= (point,point) + <@ (point,box) + poly_ops << (polygon,polygon) diff --git a/doc/src/sgml/textsearch.sgml b/doc/src/sgml/textsearch.sgml index fbe049f0636..fb3cc34c303 100644 --- a/doc/src/sgml/textsearch.sgml +++ b/doc/src/sgml/textsearch.sgml @@ -2902,7 +2902,7 @@ SELECT plainto_tsquery('supernova star'); url="https://www.cs.hmc.edu/~geoff/ispell.html">Ispell. Also, some more modern dictionary file formats are supported — MySpell (OO < 2.0.1) - and Hunspell + and Hunspell (OO >= 2.0.2). A large list of dictionaries is available on the OpenOffice Wiki. diff --git a/doc/src/sgml/user-manag.sgml b/doc/src/sgml/user-manag.sgml index 5420567896d..41b014205f7 100644 --- a/doc/src/sgml/user-manag.sgml +++ b/doc/src/sgml/user-manag.sgml @@ -191,7 +191,7 @@ CREATE USER name; - role creationroleprivilege to create + role creationroleprivilege to create A role must be explicitly given permission to create more roles @@ -200,9 +200,38 @@ CREATE USER name; name CREATEROLE. A role with CREATEROLE privilege can alter and drop other roles, too, as well as grant or revoke membership in them. - However, to create, alter, drop, or change membership of a - superuser role, superuser status is required; - CREATEROLE is insufficient for that. + Altering a role includes most changes that can be made using + ALTER ROLE, including, for example, changing + passwords. It also includes modifications to a role that can + be made using the COMMENT and + SECURITY LABEL commands. + + + However, CREATEROLE does not convey the ability to + create SUPERUSER roles, nor does it convey any + power over SUPERUSER roles that already exist. + Furthermore, CREATEROLE does not convey the power + to create REPLICATION users, nor the ability to + grant or revoke the REPLICATION privilege, nor the + ability to modify the role properties of such users. However, it does + allow ALTER ROLE ... SET and + ALTER ROLE ... RENAME to be used on + REPLICATION roles, as well as the use of + COMMENT ON ROLE, + SECURITY LABEL ON ROLE, + and DROP ROLE. + Finally, CREATEROLE does not + confer the ability to grant or revoke the BYPASSRLS + privilege. + + + Because the CREATEROLE privilege allows a user + to grant or revoke membership even in roles to which it does not (yet) + have any access, a CREATEROLE user can obtain access + to the capabilities of every predefined role in the system, including + highly privileged roles such as + pg_execute_server_program and + pg_write_server_files. @@ -277,16 +306,6 @@ CREATE USER name; and commands for details. - - - It is good practice to create a role that has the CREATEDB - and CREATEROLE privileges, but is not a superuser, and then - use this role for all routine management of databases and roles. This - approach avoids the dangers of operating as a superuser for tasks that - do not really require it. - - - A role can also have role-specific defaults for many of the run-time configuration settings described in = 2); + Assert(neranges > 0); + + /* If there's only a single range, there's no distance to calculate. */ + if (neranges == 1) + return NULL; ndistances = (neranges - 1); distances = (DistanceValue *) palloc0(sizeof(DistanceValue) * ndistances); diff --git a/src/backend/access/common/bufmask.c b/src/backend/access/common/bufmask.c index 003a0befb25..409acecf42a 100644 --- a/src/backend/access/common/bufmask.c +++ b/src/backend/access/common/bufmask.c @@ -78,7 +78,7 @@ mask_unused_space(Page page) if (pd_lower > pd_upper || pd_special < pd_upper || pd_lower < SizeOfPageHeaderData || pd_special > BLCKSZ) { - elog(ERROR, "invalid page pd_lower %u pd_upper %u pd_special %u\n", + elog(ERROR, "invalid page pd_lower %u pd_upper %u pd_special %u", pd_lower, pd_upper, pd_special); } diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c index af35a991fc3..6e2f0c39252 100644 --- a/src/backend/access/hash/hash_xlog.c +++ b/src/backend/access/hash/hash_xlog.c @@ -352,11 +352,10 @@ hash_xlog_split_allocate_page(XLogReaderState *record) } /* replay the record for new bucket */ - newbuf = XLogInitBufferForRedo(record, 1); + XLogReadBufferForRedoExtended(record, 1, RBM_ZERO_AND_CLEANUP_LOCK, true, + &newbuf); _hash_initbuf(newbuf, xlrec->new_bucket, xlrec->new_bucket, xlrec->new_bucket_flag, true); - if (!IsBufferCleanupOK(newbuf)) - elog(PANIC, "hash_xlog_split_allocate_page: failed to acquire cleanup lock"); MarkBufferDirty(newbuf); PageSetLSN(BufferGetPage(newbuf), lsn); diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c index 242333920e7..2c00de50418 100644 --- a/src/backend/access/hash/hashfunc.c +++ b/src/backend/access/hash/hashfunc.c @@ -303,6 +303,7 @@ hashtext(PG_FUNCTION_ARGS) buf = palloc(bsize); ucol_getSortKey(mylocale->info.icu.ucol, uchar, ulen, buf, bsize); + pfree(uchar); result = hash_any(buf, bsize); @@ -360,6 +361,7 @@ hashtextextended(PG_FUNCTION_ARGS) buf = palloc(bsize); ucol_getSortKey(mylocale->info.icu.ucol, uchar, ulen, buf, bsize); + pfree(uchar); result = hash_any_extended(buf, bsize, PG_GETARG_INT64(1)); diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 7ee505c6bab..30800705340 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -804,9 +804,13 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * Physically allocate the new bucket's primary page. We want to do this * before changing the metapage's mapping info, in case we can't get the - * disk space. Ideally, we don't need to check for cleanup lock on new - * bucket as no other backend could find this bucket unless meta page is - * updated. However, it is good to be consistent with old bucket locking. + * disk space. + * + * XXX It doesn't make sense to call _hash_getnewbuf first, zeroing the + * buffer, and then only afterwards check whether we have a cleanup lock. + * However, since no scan can be accessing the buffer yet, any concurrent + * accesses will just be from processes like the bgwriter or checkpointer + * which don't care about its contents, so it doesn't really matter. */ buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM); if (!IsBufferCleanupOK(buf_nblkno)) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index b398e307c97..70574f6db41 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -8912,8 +8912,7 @@ heap_xlog_visible(XLogReaderState *record) /* * We don't bump the LSN of the heap page when setting the visibility * map bit (unless checksums or wal_hint_bits is enabled, in which - * case we must), because that would generate an unworkable volume of - * full-page writes. This exposes us to torn page hazards, but since + * case we must). This exposes us to torn page hazards, but since * we're not inspecting the existing page contents in any way, we * don't care. * @@ -8927,6 +8926,9 @@ heap_xlog_visible(XLogReaderState *record) PageSetAllVisible(page); + if (XLogHintBitIsNeeded()) + PageSetLSN(page, lsn); + MarkBufferDirty(buffer); } else if (action == BLK_RESTORED) diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index e3e663a006e..44e7f7a1ba2 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -1301,7 +1301,7 @@ spgproperty(Oid index_oid, int attno, /* * Currently, SP-GiST distance-ordered scans require that there be a * distance operator in the opclass with the default types. So we assume - * that if such a operator exists, then there's a reason for it. + * that if such an operator exists, then there's a reason for it. */ /* First we need to know the column's opclass. */ diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index ea9e7815a6f..7d058217e99 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -799,7 +799,8 @@ MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members) if (ISUPDATE_from_mxstatus(members[i].status)) { if (has_update) - elog(ERROR, "new multixact has more than one updating member"); + elog(ERROR, "new multixact has more than one updating member: %s", + mxid_to_string(InvalidMultiXactId, nmembers, members)); has_update = true; } } diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index a1950fd0944..6da20d7531d 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -181,6 +181,7 @@ SimpleLruShmemSize(int nslots, int nlsns) * ctllock: LWLock to use to control access to the shared control structure. * subdir: PGDATA-relative subdirectory that will contain the files. * tranche_id: LWLock tranche ID to use for the SLRU's per-buffer LWLocks. + * sync_handler: which set of functions to use to handle sync requests */ void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index ed655baf989..d6b44fc8412 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -4410,6 +4410,10 @@ AbortCurrentTransaction(void) * a transaction block, typically because they have non-rollback-able * side effects or do internal commits. * + * If this routine completes successfully, then the calling statement is + * guaranteed that if it completes without error, its results will be + * committed immediately. + * * If we have already started a transaction block, issue an error; also issue * an error if we appear to be running inside a user-defined function (which * could issue more commands and possibly cause a failure after the statement @@ -4446,6 +4450,16 @@ PreventInTransactionBlock(bool isTopLevel, const char *stmtType) errmsg("%s cannot run inside a subtransaction", stmtType))); + /* + * inside a pipeline that has started an implicit transaction? + */ + if (MyXactFlags & XACT_FLAGS_PIPELINING) + ereport(ERROR, + (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + /* translator: %s represents an SQL statement name */ + errmsg("%s cannot be executed within a pipeline", + stmtType))); + /* * inside a function call? */ @@ -4535,6 +4549,12 @@ CheckTransactionBlock(bool isTopLevel, bool throwError, const char *stmtType) * a transaction block than when running as single commands. ANALYZE is * currently the only example. * + * If this routine returns "false", then the calling statement is allowed + * to perform internal transaction-commit-and-start cycles; there is not a + * risk of messing up any transaction already in progress. (Note that this + * is not the identical guarantee provided by PreventInTransactionBlock, + * since we will not force a post-statement commit.) + * * isTopLevel: passed down from ProcessUtility to determine whether we are * inside a function. */ @@ -4551,6 +4571,9 @@ IsInTransactionBlock(bool isTopLevel) if (IsSubTransaction()) return true; + if (MyXactFlags & XACT_FLAGS_PIPELINING) + return true; + if (!isTopLevel) return true; @@ -4558,13 +4581,6 @@ IsInTransactionBlock(bool isTopLevel) CurrentTransactionState->blockState != TBLOCK_STARTED) return true; - /* - * If we tell the caller we're not in a transaction block, then inform - * postgres.c that it had better commit when the statement is done. - * Otherwise our report could be a lie. - */ - MyXactFlags |= XACT_FLAGS_NEEDIMMEDIATECOMMIT; - return false; } diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 1ec6b82938a..17ebce7303e 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -5985,8 +5985,13 @@ recoveryStopsBefore(XLogReaderState *record) stopsHere = (recordXid == recoveryTargetXid); } - if (recoveryTarget == RECOVERY_TARGET_TIME && - getRecordTimestamp(record, &recordXtime)) + /* + * Note: we must fetch recordXtime regardless of recoveryTarget setting. + * We don't expect getRecordTimestamp ever to fail, since we already know + * this is a commit or abort record; but test its result anyway. + */ + if (getRecordTimestamp(record, &recordXtime) && + recoveryTarget == RECOVERY_TARGET_TIME) { /* * There can be many transactions that share the same commit time, so @@ -13503,6 +13508,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, wait_time = wal_retrieve_retry_interval - TimestampDifferenceMilliseconds(last_fail_time, now); + /* Do background tasks that might benefit us later. */ + KnownAssignedTransactionIdsIdleMaintenance(); + (void) WaitLatch(&XLogCtl->recoveryWakeupLatch, WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH, @@ -13779,6 +13787,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, streaming_reply_sent = true; } + /* Do any background tasks that might benefit us later. */ + KnownAssignedTransactionIdsIdleMaintenance(); + /* * Wait for more WAL to arrive. Time out after 5 seconds * to react to a trigger file promptly and to check if the diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 219f64f5298..44011306df2 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -6402,7 +6402,7 @@ recordExtObjInitPriv(Oid objoid, Oid classoid) tuple = SearchSysCache1(FOREIGNSERVEROID, ObjectIdGetDatum(objoid)); if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for foreign data wrapper %u", + elog(ERROR, "cache lookup failed for foreign server %u", objoid); aclDatum = SysCacheGetAttr(FOREIGNSERVEROID, tuple, @@ -6488,7 +6488,7 @@ recordExtObjInitPriv(Oid objoid, Oid classoid) tuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(objoid)); if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for function %u", objoid); + elog(ERROR, "cache lookup failed for schema %u", objoid); aclDatum = SysCacheGetAttr(NAMESPACEOID, tuple, Anum_pg_namespace_nspacl, &isNull); @@ -6530,7 +6530,7 @@ recordExtObjInitPriv(Oid objoid, Oid classoid) tuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(objoid)); if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for function %u", objoid); + elog(ERROR, "cache lookup failed for type %u", objoid); aclDatum = SysCacheGetAttr(TYPEOID, tuple, Anum_pg_type_typacl, &isNull); diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index a775f32e2b0..24a82a29dca 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -3661,9 +3661,8 @@ index_set_state_flags(Oid indexId, IndexStateFlagsAction action) * CONCURRENTLY that failed partway through.) * * Note: the CLUSTER logic assumes that indisclustered cannot be - * set on any invalid index, so clear that flag too. Similarly, - * ALTER TABLE assumes that indisreplident cannot be set for - * invalid indexes. + * set on any invalid index, so clear that flag too. For + * cleanliness, also clear indisreplident. */ indexForm->indisvalid = false; indexForm->indisclustered = false; diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 08ef3902a07..29a7a57e681 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -19622,7 +19622,10 @@ ATExecDropOf(Relation rel, LOCKMODE lockmode) * relation_mark_replica_identity: Update a table's replica identity * * Iff ri_type = REPLICA_IDENTITY_INDEX, indexOid must be the Oid of a suitable - * index. Otherwise, it should be InvalidOid. + * index. Otherwise, it must be InvalidOid. + * + * Caller had better hold an exclusive lock on the relation, as the results + * of running two of these concurrently wouldn't be pretty. */ static void relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, @@ -19634,7 +19637,6 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, HeapTuple pg_index_tuple; Form_pg_class pg_class_form; Form_pg_index pg_index_form; - ListCell *index; /* @@ -19656,29 +19658,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, heap_freetuple(pg_class_tuple); /* - * Check whether the correct index is marked indisreplident; if so, we're - * done. - */ - if (OidIsValid(indexOid)) - { - Assert(ri_type == REPLICA_IDENTITY_INDEX); - - pg_index_tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexOid)); - if (!HeapTupleIsValid(pg_index_tuple)) - elog(ERROR, "cache lookup failed for index %u", indexOid); - pg_index_form = (Form_pg_index) GETSTRUCT(pg_index_tuple); - - if (pg_index_form->indisreplident) - { - ReleaseSysCache(pg_index_tuple); - return; - } - ReleaseSysCache(pg_index_tuple); - } - - /* - * Clear the indisreplident flag from any index that had it previously, - * and set it for any index that should have it now. + * Update the per-index indisreplident flags correctly. */ pg_index = table_open(IndexRelationId, RowExclusiveLock); foreach(index, RelationGetIndexList(rel)) @@ -19692,19 +19672,23 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, elog(ERROR, "cache lookup failed for index %u", thisIndexOid); pg_index_form = (Form_pg_index) GETSTRUCT(pg_index_tuple); - /* - * Unset the bit if set. We know it's wrong because we checked this - * earlier. - */ - if (pg_index_form->indisreplident) + if (thisIndexOid == indexOid) { - dirty = true; - pg_index_form->indisreplident = false; + /* Set the bit if not already set. */ + if (!pg_index_form->indisreplident) + { + dirty = true; + pg_index_form->indisreplident = true; + } } - else if (thisIndexOid == indexOid) + else { - dirty = true; - pg_index_form->indisreplident = true; + /* Unset the bit if set. */ + if (pg_index_form->indisreplident) + { + dirty = true; + pg_index_form->indisreplident = false; + } } if (dirty) @@ -19715,7 +19699,9 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, /* * Invalidate the relcache for the table, so that after we commit * all sessions will refresh the table's replica identity index - * before attempting any UPDATE or DELETE on the table. + * before attempting any UPDATE or DELETE on the table. (If we + * changed the table's pg_class row above, then a relcache inval + * is already queued due to that; but we might not have.) */ CacheInvalidateRelcache(rel); } @@ -19800,12 +19786,6 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot use partial index \"%s\" as replica identity", RelationGetRelationName(indexRel)))); - /* And neither are invalid indexes. */ - if (!indexRel->rd_index->indisvalid) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use invalid index \"%s\" as replica identity", - RelationGetRelationName(indexRel)))); /* Check index for nullable columns. */ for (key = 0; key < IndexRelationGetNumberOfKeyAttributes(indexRel); key++) diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 820d4c60cc0..41da04f652b 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -1013,7 +1013,7 @@ postquel_getnext(execution_state *es, SQLFunctionCachePtr fcache) { ProcessUtility(es->qd->plannedstmt, fcache->src, - false, + true, /* protect function cache's parsetree */ PROCESS_UTILITY_QUERY, es->qd->params, es->qd->queryEnv, diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 7513d11102f..09c8991178a 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -3867,6 +3867,11 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) { int length = phasedata->gset_lengths[i]; + /* nothing to do for empty grouping set */ + if (length == 0) + continue; + + /* if we already had one of this length, it'll do */ if (phasedata->eqfunctions[length - 1] != NULL) continue; diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c index 199fff4f773..e8eb83ebd38 100644 --- a/src/backend/jit/llvm/llvmjit.c +++ b/src/backend/jit/llvm/llvmjit.c @@ -816,7 +816,7 @@ llvm_session_initialize(void) if (LLVMGetTargetFromTriple(llvm_triple, &llvm_targetref, &error) != 0) { - elog(FATAL, "failed to query triple %s\n", error); + elog(FATAL, "failed to query triple %s", error); } /* diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index c02fcd4ea73..10e41de8cd9 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -4855,12 +4855,24 @@ generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel) if (child_rel == NULL) continue; - /* Add partitionwise join paths for partitioned child-joins. */ + /* Make partitionwise join paths for this partitioned child-join. */ generate_partitionwise_join_paths(root, child_rel); + /* If we failed to make any path for this child, we must give up. */ + if (child_rel->pathlist == NIL) + { + /* + * Mark the parent joinrel as unpartitioned so that later + * functions treat it correctly. + */ + rel->nparts = 0; + return; + } + + /* Else, identify the cheapest path for it. */ set_cheapest(child_rel); - /* Dummy children will not be scanned, so ignore those. */ + /* Dummy children need not be scanned, so ignore those. */ if (IS_DUMMY_REL(child_rel)) continue; diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index ec50c66104a..d4c2b793bb5 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -600,6 +600,7 @@ get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel, Path *outer_path, JoinType jointype, JoinPathExtraData *extra) { + RelOptInfo *top_outerrel; List *param_exprs; List *hash_operators; ListCell *lc; @@ -689,10 +690,21 @@ get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel, return NULL; } + /* + * When considering a partitionwise join, we have clauses that reference + * the outerrel's top parent not outerrel itself. + */ + if (outerrel->reloptkind == RELOPT_OTHER_MEMBER_REL) + top_outerrel = find_base_rel(root, bms_singleton_member(outerrel->top_parent_relids)); + else if (outerrel->reloptkind == RELOPT_OTHER_JOINREL) + top_outerrel = find_join_rel(root, outerrel->top_parent_relids); + else + top_outerrel = outerrel; + /* Check if we have hash ops for each parameter to the path */ if (paraminfo_get_equal_hashops(root, inner_path->param_info, - outerrel, + top_outerrel, innerrel, ¶m_exprs, &hash_operators, diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c index eca7ac93a2d..9d245c31e44 100644 --- a/src/backend/optimizer/prep/prepjointree.c +++ b/src/backend/optimizer/prep/prepjointree.c @@ -29,6 +29,7 @@ #include "catalog/pg_type.h" #include "funcapi.h" +#include "miscadmin.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" @@ -243,6 +244,9 @@ static Node * pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode, Relids *relids) { + /* Since this function recurses, it could be driven to stack overflow. */ + check_stack_depth(); + if (jtnode == NULL) { *relids = NULL; @@ -900,6 +904,11 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode, JoinExpr *lowest_nulling_outer_join, AppendRelInfo *containing_appendrel) { + /* Since this function recurses, it could be driven to stack overflow. */ + check_stack_depth(); + /* Also, since it's a bit expensive, let's check for query cancel. */ + CHECK_FOR_INTERRUPTS(); + Assert(jtnode != NULL); if (IsA(jtnode, RangeTblRef)) { @@ -2057,6 +2066,9 @@ is_simple_union_all(Query *subquery) static bool is_simple_union_all_recurse(Node *setOp, Query *setOpQuery, List *colTypes) { + /* Since this function recurses, it could be driven to stack overflow. */ + check_stack_depth(); + if (IsA(setOp, RangeTblRef)) { RangeTblRef *rtr = (RangeTblRef *) setOp; @@ -3412,16 +3424,6 @@ remove_useless_results_recurse(PlannerInfo *root, Node *jtnode) jtnode = j->larg; } break; - case JOIN_RIGHT: - /* Mirror-image of the JOIN_LEFT case */ - if ((varno = get_result_relid(root, j->larg)) != 0 && - (j->quals == NULL || - !find_dependent_phvs(root, varno))) - { - remove_result_refs(root, varno, j->rarg); - jtnode = j->rarg; - } - break; case JOIN_SEMI: /* @@ -3430,14 +3432,17 @@ remove_useless_results_recurse(PlannerInfo *root, Node *jtnode) * LHS, since we should either return the LHS row or not. For * simplicity we inject the filter qual into a new FromExpr. * - * Unlike the LEFT/RIGHT cases, we just Assert that there are - * no PHVs that need to be evaluated at the semijoin's RHS, - * since the rest of the query couldn't reference any outputs - * of the semijoin's RHS. + * There is a fine point about PHVs that are supposed to be + * evaluated at the RHS. Such PHVs could only appear in the + * semijoin's qual, since the rest of the query cannot + * reference any outputs of the semijoin's RHS. Therefore, + * they can't actually go to null before being examined, and + * it'd be OK to just remove the PHV wrapping. We don't have + * infrastructure for that, but remove_result_refs() will + * relabel them as to be evaluated at the LHS, which is fine. */ if ((varno = get_result_relid(root, j->rarg)) != 0) { - Assert(!find_dependent_phvs(root, varno)); remove_result_refs(root, varno, j->larg); if (j->quals) jtnode = (Node *) @@ -3456,6 +3461,7 @@ remove_useless_results_recurse(PlannerInfo *root, Node *jtnode) break; default: + /* Note: JOIN_RIGHT should be gone at this point */ elog(ERROR, "unrecognized join type: %d", (int) j->jointype); break; diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 24b795dde17..4648a2f2719 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -6293,9 +6293,15 @@ reparameterize_path(PlannerInfo *root, Path *path, case T_Memoize: { MemoizePath *mpath = (MemoizePath *) path; + Path *spath = mpath->subpath; + spath = reparameterize_path(root, spath, + required_outer, + loop_count); + if (spath == NULL) + return NULL; return (Path *) create_memoize_path(root, rel, - mpath->subpath, + spath, mpath->param_exprs, mpath->hash_operators, mpath->singlerow, @@ -6526,6 +6532,7 @@ do { \ FLAT_COPY_PATH(mpath, path, MemoizePath); REPARAMETERIZE_CHILD_PATH(mpath->subpath); + ADJUST_CHILD_ATTRS(mpath->param_exprs); new_path = (Path *) mpath; } break; diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c index bb16a8d2c38..cfe9d3ea39d 100644 --- a/src/backend/optimizer/util/tlist.c +++ b/src/backend/optimizer/util/tlist.c @@ -1108,7 +1108,7 @@ apply_pathtarget_labeling_to_tlist(List *tlist, PathTarget *target) * * The outputs of this function are two parallel lists, one a list of * PathTargets and the other an integer list of bool flags indicating - * whether the corresponding PathTarget contains any evaluatable SRFs. + * whether the corresponding PathTarget contains any evaluable SRFs. * The lists are given in the order they'd need to be evaluated in, with * the "lowest" PathTarget first. So the last list entry is always the * originally given PathTarget, and any entries before it indicate evaluation diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 4500c711ff5..8f21a90f391 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -2368,6 +2368,13 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) if (proto == CANCEL_REQUEST_CODE || proto == FINISH_REQUEST_CODE) { + if (len != sizeof(CancelRequestPacket)) + { + ereport(COMMERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("invalid length of startup packet"))); + return STATUS_ERROR; + } processCancelRequest(port, buf, proto); /* Not really an error, but we don't want to proceed further */ return STATUS_ERROR; diff --git a/src/backend/replication/backup_manifest.c b/src/backend/replication/backup_manifest.c index 5add6672b2a..28646bdd334 100644 --- a/src/backend/replication/backup_manifest.c +++ b/src/backend/replication/backup_manifest.c @@ -377,7 +377,8 @@ SendBackupManifest(backup_manifest_info *manifest) if (rc != bytes_to_read) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from temporary file: %m"))); + errmsg("could not read from temporary file: read only %zu of %zu bytes", + rc, bytes_to_read))); pq_putmessage('d', manifestbuf, bytes_to_read); manifest_bytes_done += bytes_to_read; } diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index 4bcf0b322dc..4f1f7abd006 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -170,10 +170,7 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname, conn->streamConn = PQconnectStartParams(keys, vals, /* expand_dbname = */ true); if (PQstatus(conn->streamConn) == CONNECTION_BAD) - { - *err = pchomp(PQerrorMessage(conn->streamConn)); - return NULL; - } + goto bad_connection_errmsg; /* * Poll connection until we have OK or FAILED status. @@ -215,10 +212,7 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname, } while (status != PGRES_POLLING_OK && status != PGRES_POLLING_FAILED); if (PQstatus(conn->streamConn) != CONNECTION_OK) - { - *err = pchomp(PQerrorMessage(conn->streamConn)); - return NULL; - } + goto bad_connection_errmsg; if (logical) { @@ -229,9 +223,9 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname, if (PQresultStatus(res) != PGRES_TUPLES_OK) { PQclear(res); - ereport(ERROR, - (errmsg("could not clear search path: %s", - pchomp(PQerrorMessage(conn->streamConn))))); + *err = psprintf(_("could not clear search path: %s"), + pchomp(PQerrorMessage(conn->streamConn))); + goto bad_connection; } PQclear(res); } @@ -239,6 +233,16 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname, conn->logical = logical; return conn; + + /* error path, using libpq's error message */ +bad_connection_errmsg: + *err = pchomp(PQerrorMessage(conn->streamConn)); + + /* error path, error already set */ +bad_connection: + PQfinish(conn->streamConn); + pfree(conn); + return NULL; } /* diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 271a74c6908..709365fc8c6 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -2898,6 +2898,10 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid) { elog(DEBUG2, "aborting old transaction %u", txn->xid); + /* Notify the remote node about the crash/immediate restart. */ + if (rbtxn_is_streamed(txn)) + rb->stream_abort(rb, txn, InvalidXLogRecPtr); + /* remove potential on-disk data, and deallocate this tx */ ReorderBufferCleanupTXN(rb, txn); } @@ -3197,16 +3201,17 @@ ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid, } /* - * Setup the invalidation of the toplevel transaction. + * Accumulate the invalidations for executing them later. * * This needs to be called for each XLOG_XACT_INVALIDATIONS message and - * accumulates all the invalidation messages in the toplevel transaction as - * well as in the form of change in reorder buffer. We require to record it in - * form of the change so that we can execute only the required invalidations - * instead of executing all the invalidations on each CommandId increment. We - * also need to accumulate these in the toplevel transaction because in some - * cases we skip processing the transaction (see ReorderBufferForget), we need - * to execute all the invalidations together. + * accumulates all the invalidation messages in the toplevel transaction, if + * available, otherwise in the current transaction, as well as in the form of + * change in reorder buffer. We require to record it in form of the change + * so that we can execute only the required invalidations instead of executing + * all the invalidations on each CommandId increment. We also need to + * accumulate these in the txn buffer because in some cases where we skip + * processing the transaction (see ReorderBufferForget), we need to execute + * all the invalidations together. */ void ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, @@ -3222,8 +3227,9 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, oldcontext = MemoryContextSwitchTo(rb->context); /* - * Collect all the invalidations under the top transaction so that we can - * execute them all together. See comment atop this function + * Collect all the invalidations under the top transaction, if available, + * so that we can execute them all together. See comments atop this + * function. */ if (txn->toptxn) txn = txn->toptxn; diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 50df199f01f..1df82bc9ddc 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -343,6 +343,9 @@ AllocateSnapshotBuilder(ReorderBuffer *reorder, MemoryContextSwitchTo(oldcontext); + /* The initial running transactions array must be empty. */ + Assert(NInitialRunningXacts == 0 && InitialRunningXacts == NULL); + return builder; } @@ -363,6 +366,10 @@ FreeSnapshotBuilder(SnapBuild *builder) /* other resources are deallocated via memory context reset */ MemoryContextDelete(context); + + /* InitialRunningXacts is freed along with the context */ + NInitialRunningXacts = 0; + InitialRunningXacts = NULL; } /* diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index d104ccf86e1..75df2dc2cc6 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -1076,16 +1076,9 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) * Create a new permanent logical decoding slot. This slot will be used * for the catchup phase after COPY is done, so tell it to use the * snapshot to make the final data consistent. - * - * Prevent cancel/die interrupts while creating slot here because it is - * possible that before the server finishes this command, a concurrent - * drop subscription happens which would complete without removing this - * slot leading to a dangling slot on the server. */ - HOLD_INTERRUPTS(); walrcv_create_slot(LogRepWorkerWalRcvConn, slotname, false /* permanent */ , CRS_USE_SNAPSHOT, origin_startpos); - RESUME_INTERRUPTS(); /* * Setup replication origin tracking. The purpose of doing this before the diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index cb8f76d902a..67dd1a97a6f 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -1071,7 +1071,7 @@ apply_handle_stream_commit(StringInfo s) nchanges = 0; while (true) { - int nbytes; + size_t nbytes; int len; CHECK_FOR_INTERRUPTS(); @@ -1087,8 +1087,8 @@ apply_handle_stream_commit(StringInfo s) if (nbytes != sizeof(len)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from streaming transaction's changes file \"%s\": %m", - path))); + errmsg("could not read from streaming transaction's changes file \"%s\": read only %zu of %zu bytes", + path, nbytes, sizeof(len)))); if (len <= 0) elog(ERROR, "incorrect length %d in streaming transaction's changes file \"%s\"", @@ -1098,11 +1098,12 @@ apply_handle_stream_commit(StringInfo s) buffer = repalloc(buffer, len); /* and finally read the data into the buffer */ - if (BufFileRead(fd, buffer, len) != len) + nbytes = BufFileRead(fd, buffer, len); + if (nbytes != len) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from streaming transaction's changes file \"%s\": %m", - path))); + errmsg("could not read from streaming transaction's changes file \"%s\": read only %zu of %zu bytes", + path, nbytes, (size_t) len))); /* copy the buffer to the stringinfo and call apply_dispatch */ resetStringInfo(&s2); @@ -2717,6 +2718,7 @@ static void subxact_info_read(Oid subid, TransactionId xid) { char path[MAXPGPATH]; + size_t nread; Size len; BufFile *fd; StreamXidHash *ent; @@ -2749,13 +2751,12 @@ subxact_info_read(Oid subid, TransactionId xid) fd = BufFileOpenShared(ent->subxact_fileset, path, O_RDONLY); /* read number of subxact items */ - if (BufFileRead(fd, &subxact_data.nsubxacts, - sizeof(subxact_data.nsubxacts)) != - sizeof(subxact_data.nsubxacts)) + nread = BufFileRead(fd, &subxact_data.nsubxacts, sizeof(subxact_data.nsubxacts)); + if (nread != sizeof(subxact_data.nsubxacts)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from streaming transaction's subxact file \"%s\": %m", - path))); + errmsg("could not read from streaming transaction's subxact file \"%s\": read only %zu of %zu bytes", + path, nread, sizeof(subxact_data.nsubxacts)))); len = sizeof(SubXactInfo) * subxact_data.nsubxacts; @@ -2773,11 +2774,15 @@ subxact_info_read(Oid subid, TransactionId xid) sizeof(SubXactInfo)); MemoryContextSwitchTo(oldctx); - if ((len > 0) && ((BufFileRead(fd, subxact_data.subxacts, len)) != len)) + if (len > 0) + { + nread = BufFileRead(fd, subxact_data.subxacts, len); + if (nread != len) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from streaming transaction's subxact file \"%s\": %m", - path))); + errmsg("could not read from streaming transaction's subxact file \"%s\": read only %zu of %zu bytes", + path, nread, len))); + } BufFileClose(fd); } diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index 48d1caace62..2938edf1c6a 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -468,6 +468,10 @@ rewriteRuleAction(Query *parsetree, * NOTE: because planner will destructively alter rtable, we must ensure * that rule action's rtable is separate and shares no substructure with * the main rtable. Hence do a deep copy here. + * + * Note also that RewriteQuery() relies on the fact that RT entries from + * the original query appear at the start of the expanded rtable, so + * beware of changing this. */ sub_action->rtable = list_concat(copyObject(parsetree->rtable), sub_action->rtable); @@ -3661,9 +3665,13 @@ rewriteTargetView(Query *parsetree, Relation view) * * rewrite_events is a list of open query-rewrite actions, so we can detect * infinite recursion. + * + * orig_rt_length is the length of the originating query's rtable, for product + * queries created by fireRules(), and 0 otherwise. This is used to skip any + * already-processed VALUES RTEs from the original query. */ static List * -RewriteQuery(Query *parsetree, List *rewrite_events) +RewriteQuery(Query *parsetree, List *rewrite_events, int orig_rt_length) { CmdType event = parsetree->commandType; bool instead = false; @@ -3687,7 +3695,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events) if (ctequery->commandType == CMD_SELECT) continue; - newstuff = RewriteQuery(ctequery, rewrite_events); + newstuff = RewriteQuery(ctequery, rewrite_events, 0); /* * Currently we can only handle unconditional, single-statement DO @@ -3761,6 +3769,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events) RangeTblEntry *rt_entry; Relation rt_entry_relation; List *locks; + int product_orig_rt_length; List *product_queries; bool hasUpdate = false; int values_rte_index = 0; @@ -3782,23 +3791,30 @@ RewriteQuery(Query *parsetree, List *rewrite_events) */ if (event == CMD_INSERT) { + ListCell *lc2; RangeTblEntry *values_rte = NULL; /* - * If it's an INSERT ... VALUES (...), (...), ... there will be a - * single RTE for the VALUES targetlists. + * Test if it's a multi-row INSERT ... VALUES (...), (...), ... by + * looking for a VALUES RTE in the fromlist. For product queries, + * we must ignore any already-processed VALUES RTEs from the + * original query. These appear at the start of the rangetable. */ - if (list_length(parsetree->jointree->fromlist) == 1) + foreach(lc2, parsetree->jointree->fromlist) { - RangeTblRef *rtr = (RangeTblRef *) linitial(parsetree->jointree->fromlist); + RangeTblRef *rtr = (RangeTblRef *) lfirst(lc2); - if (IsA(rtr, RangeTblRef)) + if (IsA(rtr, RangeTblRef) && rtr->rtindex > orig_rt_length) { RangeTblEntry *rte = rt_fetch(rtr->rtindex, parsetree->rtable); if (rte->rtekind == RTE_VALUES) { + /* should not find more than one VALUES RTE */ + if (values_rte != NULL) + elog(ERROR, "more than one VALUES RTE found"); + values_rte = rte; values_rte_index = rtr->rtindex; } @@ -3870,6 +3886,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events) locks = matchLocks(event, rt_entry_relation->rd_rules, result_relation, parsetree, &hasUpdate); + product_orig_rt_length = list_length(parsetree->rtable); product_queries = fireRules(parsetree, result_relation, event, @@ -4053,7 +4070,19 @@ RewriteQuery(Query *parsetree, List *rewrite_events) Query *pt = (Query *) lfirst(n); List *newstuff; - newstuff = RewriteQuery(pt, rewrite_events); + /* + * For an updatable view, pt might be the rewritten version of + * the original query, in which case we pass on orig_rt_length + * to finish processing any VALUES RTE it contained. + * + * Otherwise, we have a product query created by fireRules(). + * Any VALUES RTEs from the original query have been fully + * processed, and must be skipped when we recurse. + */ + newstuff = RewriteQuery(pt, rewrite_events, + pt == parsetree ? + orig_rt_length : + product_orig_rt_length); rewritten = list_concat(rewritten, newstuff); } @@ -4205,7 +4234,7 @@ QueryRewrite(Query *parsetree) * * Apply all non-SELECT rules possibly getting 0 or many queries */ - querylist = RewriteQuery(parsetree, NIL); + querylist = RewriteQuery(parsetree, NIL, 0); /* * Step 2 diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index e2953686b8e..f7f5de69ffb 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -668,7 +668,7 @@ pg_truncate(const char *path, off_t length) fd = OpenTransientFile(path, O_RDWR | PG_BINARY); if (fd >= 0) { - ret = ftruncate(fd, 0); + ret = ftruncate(fd, length); save_errno = errno; CloseTransientFile(fd); errno = save_errno; diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c index 7ecd3afe1b9..208dfeea7bd 100644 --- a/src/backend/storage/ipc/latch.c +++ b/src/backend/storage/ipc/latch.c @@ -1548,7 +1548,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, /* Sleep */ rc = epoll_wait(set->epoll_fd, set->epoll_ret_events, - nevents, cur_timeout); + Min(nevents, set->nevents_space), cur_timeout); /* Check return code */ if (rc < 0) @@ -1699,7 +1699,8 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, /* Sleep */ rc = kevent(set->kqueue_fd, NULL, 0, - set->kqueue_ret_events, nevents, + set->kqueue_ret_events, + Min(nevents, set->nevents_space), timeout_p); /* Check return code */ diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 89fafdbedc1..3c7701e15b6 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -271,6 +271,17 @@ typedef enum GlobalVisHorizonKind VISHORIZON_TEMP } GlobalVisHorizonKind; +/* + * Reason codes for KnownAssignedXidsCompress(). + */ +typedef enum KAXCompressReason +{ + KAX_NO_SPACE, /* need to free up space at array end */ + KAX_PRUNE, /* we just pruned old entries */ + KAX_TRANSACTION_END, /* we just committed/removed some XIDs */ + KAX_STARTUP_PROCESS_IDLE /* startup process is about to sleep */ +} KAXCompressReason; + static ProcArrayStruct *procArray; @@ -356,7 +367,7 @@ static bool HaveVirtualXIDsDelayingChkptGuts(VirtualTransactionId *vxids, int nvxids, int type); /* Primitives for KnownAssignedXids array handling for standby */ -static void KnownAssignedXidsCompress(bool force); +static void KnownAssignedXidsCompress(KAXCompressReason reason, bool haveLock); static void KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid, bool exclusive_lock); static bool KnownAssignedXidsSearch(TransactionId xid, bool remove); @@ -5576,6 +5587,17 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid) LWLockRelease(ProcArrayLock); } +/* + * KnownAssignedTransactionIdsIdleMaintenance + * Opportunistically do maintenance work when the startup process + * is about to go idle. + */ +void +KnownAssignedTransactionIdsIdleMaintenance(void) +{ + KnownAssignedXidsCompress(KAX_STARTUP_PROCESS_IDLE, false); +} + /* * Private module functions to manipulate KnownAssignedXids @@ -5658,7 +5680,9 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid) * so there is an optimal point for any workload mix. We use a heuristic to * decide when to compress the array, though trimming also helps reduce * frequency of compressing. The heuristic requires us to track the number of - * currently valid XIDs in the array. + * currently valid XIDs in the array (N). Except in special cases, we'll + * compress when S >= 2N. Bounding S at 2N in turn bounds the time for + * taking a snapshot to be O(N), which it would have to be anyway. */ @@ -5666,42 +5690,91 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid) * Compress KnownAssignedXids by shifting valid data down to the start of the * array, removing any gaps. * - * A compression step is forced if "force" is true, otherwise we do it - * only if a heuristic indicates it's a good time to do it. + * A compression step is forced if "reason" is KAX_NO_SPACE, otherwise + * we do it only if a heuristic indicates it's a good time to do it. * - * Caller must hold ProcArrayLock in exclusive mode. + * Compression requires holding ProcArrayLock in exclusive mode. + * Caller must pass haveLock = true if it already holds the lock. */ static void -KnownAssignedXidsCompress(bool force) +KnownAssignedXidsCompress(KAXCompressReason reason, bool haveLock) { ProcArrayStruct *pArray = procArray; int head, - tail; + tail, + nelements; int compress_index; int i; - /* no spinlock required since we hold ProcArrayLock exclusively */ + /* Counters for compression heuristics */ + static unsigned int transactionEndsCounter; + static TimestampTz lastCompressTs; + + /* Tuning constants */ +#define KAX_COMPRESS_FREQUENCY 128 /* in transactions */ +#define KAX_COMPRESS_IDLE_INTERVAL 1000 /* in ms */ + + /* + * Since only the startup process modifies the head/tail pointers, we + * don't need a lock to read them here. + */ head = pArray->headKnownAssignedXids; tail = pArray->tailKnownAssignedXids; + nelements = head - tail; - if (!force) + /* + * If we can choose whether to compress, use a heuristic to avoid + * compressing too often or not often enough. "Compress" here simply + * means moving the values to the beginning of the array, so it is not as + * complex or costly as typical data compression algorithms. + */ + if (nelements == pArray->numKnownAssignedXids) { /* - * If we can choose how much to compress, use a heuristic to avoid - * compressing too often or not often enough. - * - * Heuristic is if we have a large enough current spread and less than - * 50% of the elements are currently in use, then compress. This - * should ensure we compress fairly infrequently. We could compress - * less often though the virtual array would spread out more and - * snapshots would become more expensive. + * When there are no gaps between head and tail, don't bother to + * compress, except in the KAX_NO_SPACE case where we must compress to + * create some space after the head. */ - int nelements = head - tail; + if (reason != KAX_NO_SPACE) + return; + } + else if (reason == KAX_TRANSACTION_END) + { + /* + * Consider compressing only once every so many commits. Frequency + * determined by benchmarks. + */ + if ((transactionEndsCounter++) % KAX_COMPRESS_FREQUENCY != 0) + return; - if (nelements < 4 * PROCARRAY_MAXPROCS || - nelements < 2 * pArray->numKnownAssignedXids) + /* + * Furthermore, compress only if the used part of the array is less + * than 50% full (see comments above). + */ + if (nelements < 2 * pArray->numKnownAssignedXids) return; } + else if (reason == KAX_STARTUP_PROCESS_IDLE) + { + /* + * We're about to go idle for lack of new WAL, so we might as well + * compress. But not too often, to avoid ProcArray lock contention + * with readers. + */ + if (lastCompressTs != 0) + { + TimestampTz compress_after; + + compress_after = TimestampTzPlusMilliseconds(lastCompressTs, + KAX_COMPRESS_IDLE_INTERVAL); + if (GetCurrentTimestamp() < compress_after) + return; + } + } + + /* Need to compress, so get the lock if we don't have it. */ + if (!haveLock) + LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); /* * We compress the array by reading the valid values from tail to head, @@ -5717,9 +5790,16 @@ KnownAssignedXidsCompress(bool force) compress_index++; } } + Assert(compress_index == pArray->numKnownAssignedXids); pArray->tailKnownAssignedXids = 0; pArray->headKnownAssignedXids = compress_index; + + if (!haveLock) + LWLockRelease(ProcArrayLock); + + /* Update timestamp for maintenance. No need to hold lock for this. */ + lastCompressTs = GetCurrentTimestamp(); } /* @@ -5791,18 +5871,11 @@ KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid, */ if (head + nxids > pArray->maxKnownAssignedXids) { - /* must hold lock to compress */ - if (!exclusive_lock) - LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE); - - KnownAssignedXidsCompress(true); + KnownAssignedXidsCompress(KAX_NO_SPACE, exclusive_lock); head = pArray->headKnownAssignedXids; /* note: we no longer care about the tail pointer */ - if (!exclusive_lock) - LWLockRelease(ProcArrayLock); - /* * If it still won't fit then we're out of memory */ @@ -5996,7 +6069,7 @@ KnownAssignedXidsRemoveTree(TransactionId xid, int nsubxids, KnownAssignedXidsRemove(subxids[i]); /* Opportunistically compress the array */ - KnownAssignedXidsCompress(false); + KnownAssignedXidsCompress(KAX_TRANSACTION_END, true); } /* @@ -6071,7 +6144,7 @@ KnownAssignedXidsRemovePreceding(TransactionId removeXid) } /* Opportunistically compress the array */ - KnownAssignedXidsCompress(false); + KnownAssignedXidsCompress(KAX_PRUNE, true); } /* diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 37d917a1f3e..cb9d3e6d580 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -1320,13 +1320,13 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) uint32 hashcode = locallock->hashcode; LWLock *partitionLock = LockHashPartitionLock(hashcode); PROC_QUEUE *waitQueue = &(lock->waitProcs); + SHM_QUEUE *waitQueuePos; LOCKMASK myHeldLocks = MyProc->heldLocks; TimestampTz standbyWaitStart = 0; bool early_deadlock = false; bool allow_autovacuum_cancel = true; bool logged_recovery_conflict = false; ProcWaitStatus myWaitStatus; - PGPROC *proc; PGPROC *leader = MyProc->lockGroupLeader; int i; @@ -1374,13 +1374,16 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) * we are only considering the part of the wait queue before my insertion * point. */ - if (myHeldLocks != 0) + if (myHeldLocks != 0 && waitQueue->size > 0) { LOCKMASK aheadRequests = 0; + SHM_QUEUE *proc_node; - proc = (PGPROC *) waitQueue->links.next; + proc_node = waitQueue->links.next; for (i = 0; i < waitQueue->size; i++) { + PGPROC *proc = (PGPROC *) proc_node; + /* * If we're part of the same locking group as this waiter, its * locks neither conflict with ours nor contribute to @@ -1388,7 +1391,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) */ if (leader != NULL && leader == proc->lockGroupLeader) { - proc = (PGPROC *) proc->links.next; + proc_node = proc->links.next; continue; } /* Must he wait for me? */ @@ -1423,24 +1426,25 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) } /* Nope, so advance to next waiter */ aheadRequests |= LOCKBIT_ON(proc->waitLockMode); - proc = (PGPROC *) proc->links.next; + proc_node = proc->links.next; } /* - * If we fall out of loop normally, proc points to waitQueue head, so - * we will insert at tail of queue as desired. + * If we iterated through the whole queue, cur points to the waitQueue + * head, so we will insert at tail of queue as desired. */ + waitQueuePos = proc_node; } else { /* I hold no locks, so I can't push in front of anyone. */ - proc = (PGPROC *) &(waitQueue->links); + waitQueuePos = &waitQueue->links; } /* - * Insert self into queue, ahead of the given proc (or at tail of queue). + * Insert self into queue, at the position determined above. */ - SHMQueueInsertBefore(&(proc->links), &(MyProc->links)); + SHMQueueInsertBefore(waitQueuePos, &MyProc->links); waitQueue->size++; lock->waitMask |= LOCKBIT_ON(lockmode); diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 62ded58aafb..37cdcfba46a 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -3092,6 +3092,12 @@ exec_execute_message(const char *portal_name, int64 max_rows) */ CommandCounterIncrement(); + /* + * Set XACT_FLAGS_PIPELINING whenever we complete an Execute + * message without immediately committing the transaction. + */ + MyXactFlags |= XACT_FLAGS_PIPELINING; + /* * Disable statement timeout whenever we complete an Execute * message. The next protocol message will start a fresh timeout. @@ -3107,6 +3113,12 @@ exec_execute_message(const char *portal_name, int64 max_rows) /* Portal run not complete, so send PortalSuspended */ if (whereToSendOutput == DestRemote) pq_putemptymessage('s'); + + /* + * Set XACT_FLAGS_PIPELINING whenever we suspend an Execute message, + * too. + */ + MyXactFlags |= XACT_FLAGS_PIPELINING; } /* diff --git a/src/backend/tsearch/ts_parse.c b/src/backend/tsearch/ts_parse.c index 92d95b4bd49..dbd5b176e1a 100644 --- a/src/backend/tsearch/ts_parse.c +++ b/src/backend/tsearch/ts_parse.c @@ -433,6 +433,8 @@ parsetext(Oid cfgId, ParsedText *prs, char *buf, int buflen) /* * Headline framework */ + +/* Add a word to prs->words[] */ static void hladdword(HeadlineParsedText *prs, char *buf, int buflen, int type) { @@ -449,6 +451,14 @@ hladdword(HeadlineParsedText *prs, char *buf, int buflen, int type) prs->curwords++; } +/* + * Add pos and matching-query-item data to the just-added word. + * Here, buf/buflen represent a processed lexeme, not raw token text. + * + * If the query contains more than one matching item, we replicate + * the last-added word so that each item can be pointed to. The + * duplicate entries are marked with repeated = 1. + */ static void hlfinditem(HeadlineParsedText *prs, TSQuery query, int32 pos, char *buf, int buflen) { @@ -590,6 +600,9 @@ hlparsetext(Oid cfgId, HeadlineParsedText *prs, TSQuery query, char *buf, int bu FunctionCall1(&(prsobj->prsend), PointerGetDatum(prsdata)); } +/* + * Generate the headline, as a text object, from HeadlineParsedText. + */ text * generateHeadline(HeadlineParsedText *prs) { diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c index 559dff63558..a3e5baf9782 100644 --- a/src/backend/tsearch/wparser_def.c +++ b/src/backend/tsearch/wparser_def.c @@ -1914,10 +1914,6 @@ prsd_end(PG_FUNCTION_ARGS) */ /* token type classification macros */ -#define LEAVETOKEN(x) ( (x)==SPACE ) -#define COMPLEXTOKEN(x) ( (x)==URL_T || (x)==NUMHWORD || (x)==ASCIIHWORD || (x)==HWORD ) -#define ENDPUNCTOKEN(x) ( (x)==SPACE ) - #define TS_IDIGNORE(x) ( (x)==TAG_T || (x)==PROTOCOL || (x)==SPACE || (x)==XMLENTITY ) #define HLIDREPLACE(x) ( (x)==TAG_T ) #define HLIDSKIP(x) ( (x)==URL_T || (x)==NUMHWORD || (x)==ASCIIHWORD || (x)==HWORD ) diff --git a/src/backend/utils/adt/jsonb_gin.c b/src/backend/utils/adt/jsonb_gin.c index 37499bc5622..a4d7d24509a 100644 --- a/src/backend/utils/adt/jsonb_gin.c +++ b/src/backend/utils/adt/jsonb_gin.c @@ -896,9 +896,10 @@ gin_extract_jsonb_query(PG_FUNCTION_ARGS) /* Nulls in the array are ignored */ if (key_nulls[i]) continue; + /* We rely on the array elements not being toasted */ entries[j++] = make_text_key(JGINFLAG_KEY, - VARDATA(key_datums[i]), - VARSIZE(key_datums[i]) - VARHDRSZ); + VARDATA_ANY(key_datums[i]), + VARSIZE_ANY_EXHDR(key_datums[i])); } *nentries = j; diff --git a/src/backend/utils/adt/jsonb_op.c b/src/backend/utils/adt/jsonb_op.c index 6e85e5c36b3..758fd7beaae 100644 --- a/src/backend/utils/adt/jsonb_op.c +++ b/src/backend/utils/adt/jsonb_op.c @@ -64,8 +64,9 @@ jsonb_exists_any(PG_FUNCTION_ARGS) continue; strVal.type = jbvString; - strVal.val.string.val = VARDATA(key_datums[i]); - strVal.val.string.len = VARSIZE(key_datums[i]) - VARHDRSZ; + /* We rely on the array elements not being toasted */ + strVal.val.string.val = VARDATA_ANY(key_datums[i]); + strVal.val.string.len = VARSIZE_ANY_EXHDR(key_datums[i]); if (findJsonbValueFromContainer(&jb->root, JB_FOBJECT | JB_FARRAY, @@ -97,8 +98,9 @@ jsonb_exists_all(PG_FUNCTION_ARGS) continue; strVal.type = jbvString; - strVal.val.string.val = VARDATA(key_datums[i]); - strVal.val.string.len = VARSIZE(key_datums[i]) - VARHDRSZ; + /* We rely on the array elements not being toasted */ + strVal.val.string.val = VARDATA_ANY(key_datums[i]); + strVal.val.string.len = VARSIZE_ANY_EXHDR(key_datums[i]); if (findJsonbValueFromContainer(&jb->root, JB_FOBJECT | JB_FARRAY, diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 0364765dc0e..b342c81f27b 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -516,6 +516,12 @@ pg_parse_json_or_ereport(JsonLexContext *lex, JsonSemAction *sem) JsonLexContext * makeJsonLexContext(text *json, bool need_escapes) { + /* + * Most callers pass a detoasted datum, but it's not clear that they all + * do. pg_detoast_datum_packed() is cheap insurance. + */ + json = pg_detoast_datum_packed(json); + return makeJsonLexContextCstringLen(VARDATA_ANY(json), VARSIZE_ANY_EXHDR(json), GetDatabaseEncoding(), @@ -1518,9 +1524,11 @@ jsonb_get_element(Jsonb *jb, Datum *path, int npath, bool *isnull, bool as_text) { if (have_object) { + text *subscr = DatumGetTextPP(path[i]); + jbvp = getKeyJsonValueFromContainer(container, - VARDATA(path[i]), - VARSIZE(path[i]) - VARHDRSZ, + VARDATA_ANY(subscr), + VARSIZE_ANY_EXHDR(subscr), NULL); } else if (have_array) @@ -1693,8 +1701,8 @@ push_path(JsonbParseState **st, int level, Datum *path_elems, { /* text, an object is expected */ newkey.type = jbvString; - newkey.val.string.len = VARSIZE_ANY_EXHDR(path_elems[i]); - newkey.val.string.val = VARDATA_ANY(path_elems[i]); + newkey.val.string.val = c; + newkey.val.string.len = strlen(c); (void) pushJsonbValue(st, WJB_BEGIN_OBJECT, NULL); (void) pushJsonbValue(st, WJB_KEY, &newkey); @@ -4461,6 +4469,7 @@ jsonb_delete_array(PG_FUNCTION_ARGS) if (keys_nulls[i]) continue; + /* We rely on the array elements not being toasted */ keyptr = VARDATA_ANY(keys_elems[i]); keylen = VARSIZE_ANY_EXHDR(keys_elems[i]); if (keylen == v.val.string.len && @@ -4985,6 +4994,7 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, int path_len, JsonbParseState **st, int level, JsonbValue *newval, uint32 npairs, int op_type) { + text *pathelem = NULL; int i; JsonbValue k, v; @@ -4992,6 +5002,11 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, if (level >= path_len || path_nulls[level]) done = true; + else + { + /* The path Datum could be toasted, in which case we must detoast it */ + pathelem = DatumGetTextPP(path_elems[level]); + } /* empty object is a special case for create */ if ((npairs == 0) && (op_type & JB_PATH_CREATE_OR_INSERT) && @@ -5000,8 +5015,8 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, JsonbValue newkey; newkey.type = jbvString; - newkey.val.string.len = VARSIZE_ANY_EXHDR(path_elems[level]); - newkey.val.string.val = VARDATA_ANY(path_elems[level]); + newkey.val.string.val = VARDATA_ANY(pathelem); + newkey.val.string.len = VARSIZE_ANY_EXHDR(pathelem); (void) pushJsonbValue(st, WJB_KEY, &newkey); (void) pushJsonbValue(st, WJB_VALUE, newval); @@ -5014,8 +5029,8 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, Assert(r == WJB_KEY); if (!done && - k.val.string.len == VARSIZE_ANY_EXHDR(path_elems[level]) && - memcmp(k.val.string.val, VARDATA_ANY(path_elems[level]), + k.val.string.len == VARSIZE_ANY_EXHDR(pathelem) && + memcmp(k.val.string.val, VARDATA_ANY(pathelem), k.val.string.len) == 0) { done = true; @@ -5055,8 +5070,8 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, JsonbValue newkey; newkey.type = jbvString; - newkey.val.string.len = VARSIZE_ANY_EXHDR(path_elems[level]); - newkey.val.string.val = VARDATA_ANY(path_elems[level]); + newkey.val.string.val = VARDATA_ANY(pathelem); + newkey.val.string.len = VARSIZE_ANY_EXHDR(pathelem); (void) pushJsonbValue(st, WJB_KEY, &newkey); (void) pushJsonbValue(st, WJB_VALUE, newval); @@ -5099,8 +5114,8 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, JsonbValue newkey; newkey.type = jbvString; - newkey.val.string.len = VARSIZE_ANY_EXHDR(path_elems[level]); - newkey.val.string.val = VARDATA_ANY(path_elems[level]); + newkey.val.string.val = VARDATA_ANY(pathelem); + newkey.val.string.len = VARSIZE_ANY_EXHDR(pathelem); (void) pushJsonbValue(st, WJB_KEY, &newkey); (void) push_path(st, level, path_elems, path_nulls, @@ -5509,6 +5524,8 @@ transform_jsonb_string_values(Jsonb *jsonb, void *action_state, if ((type == WJB_VALUE || type == WJB_ELEM) && v.type == jbvString) { out = transform_action(action_state, v.val.string.val, v.val.string.len); + /* out is probably not toasted, but let's be sure */ + out = pg_detoast_datum_packed(out); v.val.string.val = VARDATA_ANY(out); v.val.string.len = VARSIZE_ANY_EXHDR(out); res = pushJsonbValue(&st, type, type < WJB_BEGIN_ARRAY ? &v : NULL); diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c index 078aaef5392..cd2ac04d89e 100644 --- a/src/backend/utils/adt/jsonpath_exec.c +++ b/src/backend/utils/adt/jsonpath_exec.c @@ -958,9 +958,13 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp, JsonbValue *v; bool hasNext = jspGetNext(jsp, &elem); - if (!hasNext && !found) + if (!hasNext && !found && jsp->type != jpiVariable) { - res = jperOk; /* skip evaluation */ + /* + * Skip evaluation, but not for variables. We must + * trigger an error for the missing variable. + */ + res = jperOk; break; } diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index ea8156bebad..bf663afb3c5 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -2796,8 +2796,8 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS) * * Note: if you change the output format of this function, be careful not * to break psql's rules (in \ef and \sf) for identifying the start of the - * function body. To wit: the function body starts on a line that begins - * with "AS ", and no preceding line will look like that. + * function body. To wit: the function body starts on a line that begins with + * "AS ", "BEGIN ", or "RETURN ", and no preceding line will look like that. */ Datum pg_get_functiondef(PG_FUNCTION_ARGS) @@ -8304,11 +8304,12 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags) { case T_FuncExpr: { - /* special handling for casts */ + /* special handling for casts and COERCE_SQL_SYNTAX */ CoercionForm type = ((FuncExpr *) parentNode)->funcformat; if (type == COERCE_EXPLICIT_CAST || - type == COERCE_IMPLICIT_CAST) + type == COERCE_IMPLICIT_CAST || + type == COERCE_SQL_SYNTAX) return false; return true; /* own parentheses */ } @@ -8356,11 +8357,12 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags) return false; case T_FuncExpr: { - /* special handling for casts */ + /* special handling for casts and COERCE_SQL_SYNTAX */ CoercionForm type = ((FuncExpr *) parentNode)->funcformat; if (type == COERCE_EXPLICIT_CAST || - type == COERCE_IMPLICIT_CAST) + type == COERCE_IMPLICIT_CAST || + type == COERCE_SQL_SYNTAX) return false; return true; /* own parentheses */ } @@ -10432,9 +10434,11 @@ get_func_sql_syntax(FuncExpr *expr, deparse_context *context) case F_TIMEZONE_TEXT_TIMETZ: /* AT TIME ZONE ... note reversed argument order */ appendStringInfoChar(buf, '('); - get_rule_expr((Node *) lsecond(expr->args), context, false); + get_rule_expr_paren((Node *) lsecond(expr->args), context, false, + (Node *) expr); appendStringInfoString(buf, " AT TIME ZONE "); - get_rule_expr((Node *) linitial(expr->args), context, false); + get_rule_expr_paren((Node *) linitial(expr->args), context, false, + (Node *) expr); appendStringInfoChar(buf, ')'); return true; @@ -10486,9 +10490,10 @@ get_func_sql_syntax(FuncExpr *expr, deparse_context *context) case F_IS_NORMALIZED: /* IS xxx NORMALIZED */ - appendStringInfoString(buf, "(("); - get_rule_expr((Node *) linitial(expr->args), context, false); - appendStringInfoString(buf, ") IS"); + appendStringInfoString(buf, "("); + get_rule_expr_paren((Node *) linitial(expr->args), context, false, + (Node *) expr); + appendStringInfoString(buf, " IS"); if (list_length(expr->args) == 2) { Const *con = (Const *) lsecond(expr->args); @@ -10509,11 +10514,6 @@ get_func_sql_syntax(FuncExpr *expr, deparse_context *context) appendStringInfoChar(buf, ')'); return true; - /* - * XXX EXTRACT, a/k/a date_part(), is intentionally not covered - * yet. Add it after we change the return type to numeric. - */ - case F_NORMALIZE: /* NORMALIZE() */ appendStringInfoString(buf, "NORMALIZE("); @@ -11787,7 +11787,7 @@ get_opclass_name_for_distribution_key(Oid opclass, Oid actual_datatype, /* * generate_opclass_name - * Compute the name to display for a opclass specified by OID + * Compute the name to display for an opclass specified by OID * * The result includes all necessary quoting and schema-prefixing. */ diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 10017cb583a..1f042e7c631 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -6202,7 +6202,7 @@ get_stats_slot_range(AttStatsSlot *sslot, Oid opfuncoid, FmgrInfo *opproc, * and fetching its low and/or high values. * If successful, store values in *min and *max, and return true. * (Either pointer can be NULL if that endpoint isn't needed.) - * If no data available, return false. + * If unsuccessful, return false. * * sortop is the "<" comparison operator to use. * collation is the required collation. @@ -6331,11 +6331,11 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata, } else { - /* If min not requested, assume index is nonempty */ + /* If min not requested, still want to fetch max */ have_data = true; } - /* If max is requested, and we didn't find the index is empty */ + /* If max is requested, and we didn't already fail ... */ if (max && have_data) { /* scan in the opposite direction; all else is the same */ @@ -6369,7 +6369,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata, /* * Get one endpoint datum (min or max depending on indexscandir) from the - * specified index. Return true if successful, false if index is empty. + * specified index. Return true if successful, false if not. * On success, endpoint value is stored to *endpointDatum (and copied into * outercontext). * @@ -6379,6 +6379,9 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata, * to probe the heap. * (We could compute these values locally, but that would mean computing them * twice when get_actual_variable_range needs both the min and the max.) + * + * Failure occurs either when the index is empty, or we decide that it's + * taking too long to find a suitable tuple. */ static bool get_actual_variable_endpoint(Relation heapRel, @@ -6395,6 +6398,8 @@ get_actual_variable_endpoint(Relation heapRel, SnapshotData SnapshotNonVacuumable; IndexScanDesc index_scan; Buffer vmbuffer = InvalidBuffer; + BlockNumber last_heap_block = InvalidBlockNumber; + int n_visited_heap_pages = 0; ItemPointer tid; Datum values[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; @@ -6437,6 +6442,12 @@ get_actual_variable_endpoint(Relation heapRel, * might get a bogus answer that's not close to the index extremal value, * or could even be NULL. We avoid this hazard because we take the data * from the index entry not the heap. + * + * Despite all this care, there are situations where we might find many + * non-visible tuples near the end of the index. We don't want to expend + * a huge amount of time here, so we give up once we've read too many heap + * pages. When we fail for that reason, the caller will end up using + * whatever extremal value is recorded in pg_statistic. */ InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(heapRel)); @@ -6451,13 +6462,37 @@ get_actual_variable_endpoint(Relation heapRel, /* Fetch first/next tuple in specified direction */ while ((tid = index_getnext_tid(index_scan, indexscandir)) != NULL) { + BlockNumber block = ItemPointerGetBlockNumber(tid); + if (!VM_ALL_VISIBLE(heapRel, - ItemPointerGetBlockNumber(tid), + block, &vmbuffer)) { /* Rats, we have to visit the heap to check visibility */ if (!index_fetch_heap(index_scan, tableslot)) + { + /* + * No visible tuple for this index entry, so we need to + * advance to the next entry. Before doing so, count heap + * page fetches and give up if we've done too many. + * + * We don't charge a page fetch if this is the same heap page + * as the previous tuple. This is on the conservative side, + * since other recently-accessed pages are probably still in + * buffers too; but it's good enough for this heuristic. + */ +#define VISITED_PAGES_LIMIT 100 + + if (block != last_heap_block) + { + last_heap_block = block; + n_visited_heap_pages++; + if (n_visited_heap_pages > VISITED_PAGES_LIMIT) + break; + } + continue; /* no visible tuple, try next index entry */ + } /* We don't actually need the heap tuple for anything */ ExecClearTuple(tableslot); diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c index 9a4f0a64427..ca23d32d7b3 100644 --- a/src/backend/utils/adt/tsvector_op.c +++ b/src/backend/utils/adt/tsvector_op.c @@ -1625,6 +1625,9 @@ TS_phrase_execute(QueryItem *curitem, void *arg, uint32 flags, /* since this function recurses, it could be driven to stack overflow */ check_stack_depth(); + /* ... and let's check for query cancel while we're at it */ + CHECK_FOR_INTERRUPTS(); + if (curitem->type == QI_VAL) return chkcond(arg, (QueryOperand *) curitem, data); diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c index f96c6508eb3..a5c651d629c 100644 --- a/src/backend/utils/adt/varchar.c +++ b/src/backend/utils/adt/varchar.c @@ -1020,6 +1020,7 @@ hashbpchar(PG_FUNCTION_ARGS) buf = palloc(bsize); ucol_getSortKey(mylocale->info.icu.ucol, uchar, ulen, buf, bsize); + pfree(uchar); result = hash_any(buf, bsize); @@ -1081,6 +1082,7 @@ hashbpcharextended(PG_FUNCTION_ARGS) buf = palloc(bsize); ucol_getSortKey(mylocale->info.icu.ucol, uchar, ulen, buf, bsize); + pfree(uchar); result = hash_any_extended(buf, bsize, PG_GETARG_INT64(1)); diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index cb3b1a1cbdd..168f6113ce2 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -3935,7 +3935,7 @@ static struct config_real ConfigureNamesReal[] = }, &CheckPointCompletionTarget, 0.9, 0.0, 1.0, - NULL, NULL, NULL + NULL, assign_checkpoint_completion_target, NULL }, { diff --git a/src/backend/utils/sort/sharedtuplestore.c b/src/backend/utils/sort/sharedtuplestore.c index 65e18eff8f2..f519ef838fe 100644 --- a/src/backend/utils/sort/sharedtuplestore.c +++ b/src/backend/utils/sort/sharedtuplestore.c @@ -158,6 +158,7 @@ sts_initialize(SharedTuplestore *sts, int participants, LWLockInitialize(&sts->participants[i].lock, LWTRANCHE_SHARED_TUPLESTORE); sts->participants[i].read_page = 0; + sts->participants[i].npages = 0; sts->participants[i].writing = false; } @@ -321,7 +322,7 @@ sts_puttuple(SharedTuplestoreAccessor *accessor, void *meta_data, /* Do we have space? */ size = accessor->sts->meta_data_size + tuple->t_len; - if (accessor->write_pointer + size >= accessor->write_end) + if (accessor->write_pointer + size > accessor->write_end) { if (accessor->write_chunk == NULL) { @@ -341,7 +342,7 @@ sts_puttuple(SharedTuplestoreAccessor *accessor, void *meta_data, } /* It may still not be enough in the case of a gigantic tuple. */ - if (accessor->write_pointer + size >= accessor->write_end) + if (accessor->write_pointer + size > accessor->write_end) { size_t written; diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 7ea694ce07d..4a8f4937605 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -4169,6 +4169,7 @@ void getPolicies(Archive *fout, TableInfo tblinfo[], int numTables) { PQExpBuffer query; + PQExpBuffer tbloids; PGresult *res; PolicyInfo *polinfo; int i_oid; @@ -4184,15 +4185,17 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables) j, ntups; + /* No policies before 9.5 */ if (fout->remoteVersion < 90500) return; query = createPQExpBuffer(); + tbloids = createPQExpBuffer(); /* - * First, check which tables have RLS enabled. We represent RLS being - * enabled on a table by creating a PolicyInfo object with null polname. + * Identify tables of interest, and check which ones have RLS enabled. */ + appendPQExpBufferChar(tbloids, '{'); for (i = 0; i < numTables; i++) { TableInfo *tbinfo = &tblinfo[i]; @@ -4201,9 +4204,23 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables) if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY)) continue; + /* It can't have RLS or policies if it's not a table */ + if (tbinfo->relkind != RELKIND_RELATION && + tbinfo->relkind != RELKIND_PARTITIONED_TABLE) + continue; + + /* Add it to the list of table OIDs to be probed below */ + if (tbloids->len > 1) /* do we have more than the '{'? */ + appendPQExpBufferChar(tbloids, ','); + appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid); + + /* Is RLS enabled? (That's separate from whether it has policies) */ if (tbinfo->rowsec) { /* + * We represent RLS being enabled on a table by creating a + * PolicyInfo object with null polname. + * * Note: use tableoid 0 so that this object won't be mistaken for * something that pg_depend entries apply to. */ @@ -4223,15 +4240,18 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables) polinfo->polwithcheck = NULL; } } + appendPQExpBufferChar(tbloids, '}'); /* - * Now, read all RLS policies, and create PolicyInfo objects for all those - * that are of interest. + * Now, read all RLS policies belonging to the tables of interest, and + * create PolicyInfo objects for them. (Note that we must filter the + * results server-side not locally, because we dare not apply pg_get_expr + * to tables we don't have lock on.) */ pg_log_info("reading row-level security policies"); printfPQExpBuffer(query, - "SELECT oid, tableoid, pol.polrelid, pol.polname, pol.polcmd, "); + "SELECT pol.oid, pol.tableoid, pol.polrelid, pol.polname, pol.polcmd, "); if (fout->remoteVersion >= 100000) appendPQExpBuffer(query, "pol.polpermissive, "); else @@ -4241,7 +4261,9 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables) " pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, " "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, " "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck " - "FROM pg_catalog.pg_policy pol"); + "FROM unnest('%s'::pg_catalog.oid[]) AS src(tbloid)\n" + "JOIN pg_catalog.pg_policy pol ON (src.tbloid = pol.polrelid)", + tbloids->data); res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); @@ -4265,13 +4287,6 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables) Oid polrelid = atooid(PQgetvalue(res, j, i_polrelid)); TableInfo *tbinfo = findTableByOid(polrelid); - /* - * Ignore row security on tables not to be dumped. (This will - * result in some harmless wasted slots in polinfo[].) - */ - if (!(tbinfo->dobj.dump & DUMP_COMPONENT_POLICY)) - continue; - polinfo[j].dobj.objType = DO_POLICY; polinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid)); @@ -4306,6 +4321,7 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables) PQclear(res); destroyPQExpBuffer(query); + destroyPQExpBuffer(tbloids); } /* diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c index da2f3cb9e38..a1e96686793 100644 --- a/src/bin/pg_upgrade/info.c +++ b/src/bin/pg_upgrade/info.c @@ -462,11 +462,10 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) query[0] = '\0'; /* initialize query string to empty */ /* - * Create a CTE that collects OIDs of regular user tables, including - * matviews and sequences, but excluding toast tables and indexes. We - * assume that relations with OIDs >= FirstNormalObjectId belong to the - * user. (That's probably redundant with the namespace-name exclusions, - * but let's be safe.) + * Create a CTE that collects OIDs of regular user tables and matviews, + * but excluding toast tables and indexes. We assume that relations with + * OIDs >= FirstNormalObjectId belong to the user. (That's probably + * redundant with the namespace-name exclusions, but let's be safe.) * * pg_largeobject contains user data that does not appear in pg_dump * output, so we have to copy that system table. It's easiest to do that diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 4215504dc11..269925342be 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -164,8 +164,7 @@ static bool get_create_object_cmd(EditableObjectType obj_type, Oid oid, PQExpBuffer buf); static int strip_lineno_from_objdesc(char *obj); static int count_lines_in_buf(PQExpBuffer buf); -static void print_with_linenumbers(FILE *output, char *lines, - const char *header_keyword); +static void print_with_linenumbers(FILE *output, char *lines, bool is_func); static void minimal_error_message(PGresult *res); static void printSSLInfo(void); @@ -1166,17 +1165,19 @@ exec_command_ef_ev(PsqlScanState scan_state, bool active_branch, /* * lineno "1" should correspond to the first line of the * function body. We expect that pg_get_functiondef() will - * emit that on a line beginning with "AS ", and that there - * can be no such line before the real start of the function - * body. Increment lineno by the number of lines before that - * line, so that it becomes relative to the first line of the - * function definition. + * emit that on a line beginning with "AS ", "BEGIN ", or + * "RETURN ", and that there can be no such line before the + * real start of the function body. Increment lineno by the + * number of lines before that line, so that it becomes + * relative to the first line of the function definition. */ const char *lines = query_buf->data; while (*lines != '\0') { - if (strncmp(lines, "AS ", 3) == 0) + if (strncmp(lines, "AS ", 3) == 0 || + strncmp(lines, "BEGIN ", 6) == 0 || + strncmp(lines, "RETURN ", 7) == 0) break; lineno++; /* find start of next line */ @@ -2454,15 +2455,8 @@ exec_command_sf_sv(PsqlScanState scan_state, bool active_branch, if (show_linenumbers) { - /* - * For functions, lineno "1" should correspond to the first - * line of the function body. We expect that - * pg_get_functiondef() will emit that on a line beginning - * with "AS ", and that there can be no such line before the - * real start of the function body. - */ - print_with_linenumbers(output, buf->data, - is_func ? "AS " : NULL); + /* add line numbers */ + print_with_linenumbers(output, buf->data, is_func); } else { @@ -5357,24 +5351,28 @@ count_lines_in_buf(PQExpBuffer buf) /* * Write text at *lines to output with line numbers. * - * If header_keyword isn't NULL, then line 1 should be the first line beginning - * with header_keyword; lines before that are unnumbered. + * For functions, lineno "1" should correspond to the first line of the + * function body; lines before that are unnumbered. We expect that + * pg_get_functiondef() will emit that on a line beginning with "AS ", + * "BEGIN ", or "RETURN ", and that there can be no such line before + * the real start of the function body. * * Caution: this scribbles on *lines. */ static void -print_with_linenumbers(FILE *output, char *lines, - const char *header_keyword) +print_with_linenumbers(FILE *output, char *lines, bool is_func) { - bool in_header = (header_keyword != NULL); - size_t header_sz = in_header ? strlen(header_keyword) : 0; + bool in_header = is_func; int lineno = 0; while (*lines != '\0') { char *eol; - if (in_header && strncmp(lines, header_keyword, header_sz) == 0) + if (in_header && + (strncmp(lines, "AS ", 3) == 0 || + strncmp(lines, "BEGIN ", 6) == 0 || + strncmp(lines, "RETURN ", 7) == 0)) in_header = false; /* increment lineno only for body's lines */ diff --git a/src/bin/psql/create_help.pl b/src/bin/psql/create_help.pl index 83324239740..339884102ee 100644 --- a/src/bin/psql/create_help.pl +++ b/src/bin/psql/create_help.pl @@ -41,7 +41,7 @@ $define =~ tr/a-z/A-Z/; $define =~ s/\W/_/g; -opendir(DIR, $docdir) +opendir(my $dh, $docdir) or die "$0: could not open documentation source dir '$docdir': $!\n"; open(my $hfile_handle, '>', $hfile) or die "$0: could not open output file '$hfile': $!\n"; @@ -93,7 +93,7 @@ my %entries; -foreach my $file (sort readdir DIR) +foreach my $file (sort readdir $dh) { my ($cmdid, @cmdnames, $cmddesc, $cmdsynopsis); $file =~ /\.sgml$/ or next; @@ -216,4 +216,4 @@ close $cfile_handle; close $hfile_handle; -closedir DIR; +closedir $dh; diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index 3593b78577d..1bdb618da93 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -4023,11 +4023,12 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH("TO"); /* - * Complete ALTER DATABASE|FUNCTION||PROCEDURE|ROLE|ROUTINE|USER ... SET + * Complete ALTER DATABASE|FUNCTION|PROCEDURE|ROLE|ROUTINE|USER ... SET * */ else if (HeadMatches("ALTER", "DATABASE|FUNCTION|PROCEDURE|ROLE|ROUTINE|USER") && - TailMatches("SET", MatchAny)) + TailMatches("SET", MatchAny) && + !TailMatches("SCHEMA")) COMPLETE_WITH("FROM CURRENT", "TO"); /* diff --git a/src/include/access/xact.h b/src/include/access/xact.h index 25f224b2c2b..dd7687a1246 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -121,6 +121,13 @@ extern int MyXactFlags; */ #define XACT_FLAGS_NEEDIMMEDIATECOMMIT (1U << 2) +/* + * XACT_FLAGS_PIPELINING - set when we complete an extended-query-protocol + * Execute message. This is useful for detecting that an implicit transaction + * block has been created via pipelining. + */ +#define XACT_FLAGS_PIPELINING (1U << 3) + /* * start- and end-of-transaction callbacks for dynamically loaded modules */ diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index b02a2ccf0c1..41220887050 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -1933,8 +1933,8 @@ typedef struct MemoizePath { Path path; Path *subpath; /* outerpath to cache tuples from */ - List *hash_operators; /* hash operators for each key */ - List *param_exprs; /* cache keys */ + List *hash_operators; /* OIDs of hash equality ops for cache keys */ + List *param_exprs; /* expressions that are cache keys */ bool singlerow; /* true if the cache entry is to be marked as * complete after caching the first record. */ bool binary_mode; /* true when cache key should be compared bit diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h index 607834e46c4..dba7a6cf8a4 100644 --- a/src/include/storage/procarray.h +++ b/src/include/storage/procarray.h @@ -43,6 +43,7 @@ extern void ExpireTreeKnownAssignedTransactionIds(TransactionId xid, TransactionId max_xid); extern void ExpireAllKnownAssignedTransactionIds(void); extern void ExpireOldKnownAssignedTransactionIds(TransactionId xid); +extern void KnownAssignedTransactionIdsIdleMaintenance(void); extern int GetMaxSnapshotXidCount(void); extern int GetMaxSnapshotSubxidCount(void); diff --git a/src/include/tsearch/ts_public.h b/src/include/tsearch/ts_public.h index adb9ae5fb9c..c9e5888bdd4 100644 --- a/src/include/tsearch/ts_public.h +++ b/src/include/tsearch/ts_public.h @@ -30,33 +30,60 @@ typedef struct } LexDescr; /* - * Interface to headline generator + * Interface to headline generator (tsparser's prsheadline function) + * + * HeadlineParsedText describes the text that is to be highlighted. + * Some fields are passed from the core code to the prsheadline function, + * while others are output from the prsheadline function. + * + * The principal data is words[], an array of HeadlineWordEntry, + * one entry per token, of length curwords. + * The fields of HeadlineWordEntry are: + * + * in, selected, replace, skip: these flags are initially zero + * and may be set by the prsheadline function. A consecutive group + * of tokens marked "in" form a "fragment" to be output. + * Such tokens may additionally be marked selected, replace, or skip + * to modify how they are shown. (If you set more than one of those + * bits, you get an unspecified one of those behaviors.) + * + * type, len, pos, word: filled by core code to describe the token. + * + * item: if the token matches any operand of the tsquery of interest, + * a pointer to such an operand. (If there are multiple matching + * operands, we generate extra copies of the HeadlineWordEntry to hold + * all the pointers. The extras are marked with repeated = 1 and should + * be ignored except for checking the item pointer.) */ typedef struct { - uint32 selected:1, - in:1, - replace:1, - repeated:1, - skip:1, - unused:3, - type:8, - len:16; - WordEntryPos pos; - char *word; - QueryOperand *item; + uint32 selected:1, /* token is to be highlighted */ + in:1, /* token is part of headline */ + replace:1, /* token is to be replaced with a space */ + repeated:1, /* duplicate entry to hold item pointer */ + skip:1, /* token is to be skipped (not output) */ + unused:3, /* available bits */ + type:8, /* parser's token category */ + len:16; /* length of token */ + WordEntryPos pos; /* position of token */ + char *word; /* text of token (not null-terminated) */ + QueryOperand *item; /* a matching query operand, or NULL if none */ } HeadlineWordEntry; typedef struct { + /* Fields filled by core code before calling prsheadline function: */ HeadlineWordEntry *words; - int32 lenwords; - int32 curwords; - int32 vectorpos; /* positions a-la tsvector */ - char *startsel; + int32 lenwords; /* allocated length of words[] */ + int32 curwords; /* current number of valid entries */ + int32 vectorpos; /* used by ts_parse.c in filling pos fields */ + + /* The prsheadline function must fill these fields: */ + /* Strings for marking selected tokens and separating fragments: */ + char *startsel; /* palloc'd strings */ char *stopsel; char *fragdelim; - int16 startsellen; + int16 startsellen; /* lengths of strings */ int16 stopsellen; int16 fragdelimlen; } HeadlineParsedText; diff --git a/src/interfaces/libpq/fe-auth-scram.c b/src/interfaces/libpq/fe-auth-scram.c index 345f046f807..0c1c4cd7e53 100644 --- a/src/interfaces/libpq/fe-auth-scram.c +++ b/src/interfaces/libpq/fe-auth-scram.c @@ -886,7 +886,8 @@ pg_fe_scram_build_secret(const char *password) /* * Normalize the password with SASLprep. If that doesn't work, because * the password isn't valid UTF-8 or contains prohibited characters, just - * proceed with the original password. (See comments at top of file.) + * proceed with the original password. (See comments at the top of + * auth-scram.c.) */ rc = pg_saslprep(password, &prep_password); if (rc == SASLPREP_OOM) diff --git a/src/port/snprintf.c b/src/port/snprintf.c index 87525663907..8306ab4f2b8 100644 --- a/src/port/snprintf.c +++ b/src/port/snprintf.c @@ -109,6 +109,16 @@ #undef vprintf #undef printf +/* + * We use the platform's native snprintf() for some machine-dependent cases. + * While that's required by C99, Microsoft Visual Studio lacks it before + * VS2015. Fortunately, we don't really need the length check in practice, + * so just fall back to native sprintf() on that platform. + */ +#if defined(_MSC_VER) && _MSC_VER < 1900 /* pre-VS2015 */ +#define snprintf(str,size,...) sprintf(str,__VA_ARGS__) +#endif + /* * Info about where the formatted output is going. * diff --git a/src/test/isolation/expected/drop-index-concurrently-1.out b/src/test/isolation/expected/drop-index-concurrently-1.out index e7b9272f455..c7f24252e34 100644 --- a/src/test/isolation/expected/drop-index-concurrently-1.out +++ b/src/test/isolation/expected/drop-index-concurrently-1.out @@ -1,17 +1,17 @@ Parsed test spec with 3 sessions -starting permutation: noseq chkiso prepi preps begin explaini explains select2 drop insert2 end2 selecti selects end -step noseq: SET enable_seqscan = false; +starting permutation: chkiso prepi preps begin disableseq explaini enableseq explains select2 drop insert2 end2 selecti selects end step chkiso: SELECT (setting in ('read committed','read uncommitted')) AS is_read_committed FROM pg_settings WHERE name = 'default_transaction_isolation'; is_read_committed ----------------- t (1 row) -step prepi: PREPARE getrow_idx AS SELECT * FROM test_dc WHERE data=34 ORDER BY id,data; -step preps: PREPARE getrow_seq AS SELECT * FROM test_dc WHERE data::text=34::text ORDER BY id,data; +step prepi: PREPARE getrow_idxscan AS SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; +step preps: PREPARE getrow_seqscan AS SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; step begin: BEGIN; -step explaini: EXPLAIN (COSTS OFF) EXECUTE getrow_idx; +step disableseq: SET enable_seqscan = false; +step explaini: EXPLAIN (COSTS OFF) EXECUTE getrow_idxscan; QUERY PLAN ---------------------------------------------- Sort @@ -21,17 +21,18 @@ Sort Optimizer: Postgres query optimizer (5 rows) -step explains: EXPLAIN (COSTS OFF) EXECUTE getrow_seq; -QUERY PLAN ----------------------------------------------- -Sort - Sort Key: id, data - -> Index Scan using test_dc_pkey on test_dc - Filter: ((data)::text = '34'::text) +step enableseq: SET enable_seqscan = true; +step explains: EXPLAIN (COSTS OFF) EXECUTE getrow_seqscan; +QUERY PLAN +--------------------------- +Sort + Sort Key: id + -> Seq Scan on test_dc + Filter: (data = 34) Optimizer: Postgres query optimizer (5 rows) -step select2: SELECT * FROM test_dc WHERE data=34 ORDER BY id,data; +step select2: SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; id|data --+---- 34| 34 @@ -40,14 +41,14 @@ id|data step drop: DROP INDEX CONCURRENTLY test_dc_data; step insert2: INSERT INTO test_dc(data) SELECT * FROM generate_series(1, 100); step end2: COMMIT; -step selecti: EXECUTE getrow_idx; +step selecti: EXECUTE getrow_idxscan; id|data ---+---- 34| 34 134| 34 (2 rows) -step selects: EXECUTE getrow_seq; +step selects: EXECUTE getrow_seqscan; id|data ---+---- 34| 34 diff --git a/src/test/isolation/expected/drop-index-concurrently-1_2.out b/src/test/isolation/expected/drop-index-concurrently-1_2.out index 04612d3cacc..266b0e4adac 100644 --- a/src/test/isolation/expected/drop-index-concurrently-1_2.out +++ b/src/test/isolation/expected/drop-index-concurrently-1_2.out @@ -1,17 +1,17 @@ Parsed test spec with 3 sessions -starting permutation: noseq chkiso prepi preps begin explaini explains select2 drop insert2 end2 selecti selects end -step noseq: SET enable_seqscan = false; +starting permutation: chkiso prepi preps begin disableseq explaini enableseq explains select2 drop insert2 end2 selecti selects end step chkiso: SELECT (setting in ('read committed','read uncommitted')) AS is_read_committed FROM pg_settings WHERE name = 'default_transaction_isolation'; is_read_committed ----------------- f (1 row) -step prepi: PREPARE getrow_idx AS SELECT * FROM test_dc WHERE data=34 ORDER BY id,data; -step preps: PREPARE getrow_seq AS SELECT * FROM test_dc WHERE data::text=34::text ORDER BY id,data; +step prepi: PREPARE getrow_idxscan AS SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; +step preps: PREPARE getrow_seqscan AS SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; step begin: BEGIN; -step explaini: EXPLAIN (COSTS OFF) EXECUTE getrow_idx; +step disableseq: SET enable_seqscan = false; +step explaini: EXPLAIN (COSTS OFF) EXECUTE getrow_idxscan; QUERY PLAN ---------------------------------------------- Sort @@ -20,16 +20,17 @@ Sort Index Cond: (data = 34) (4 rows) -step explains: EXPLAIN (COSTS OFF) EXECUTE getrow_seq; -QUERY PLAN ----------------------------------------------- -Sort - Sort Key: id, data - -> Index Scan using test_dc_pkey on test_dc - Filter: ((data)::text = '34'::text) +step enableseq: SET enable_seqscan = true; +step explains: EXPLAIN (COSTS OFF) EXECUTE getrow_seqscan; +QUERY PLAN +--------------------------- +Sort + Sort Key: id + -> Seq Scan on test_dc + Filter: (data = 34) (4 rows) -step select2: SELECT * FROM test_dc WHERE data=34 ORDER BY id,data; +step select2: SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; id|data --+---- 34| 34 @@ -38,13 +39,13 @@ id|data step drop: DROP INDEX CONCURRENTLY test_dc_data; step insert2: INSERT INTO test_dc(data) SELECT * FROM generate_series(1, 100); step end2: COMMIT; -step selecti: EXECUTE getrow_idx; +step selecti: EXECUTE getrow_idxscan; id|data --+---- 34| 34 (1 row) -step selects: EXECUTE getrow_seq; +step selects: EXECUTE getrow_seqscan; id|data --+---- 34| 34 diff --git a/src/test/isolation/specs/drop-index-concurrently-1.spec b/src/test/isolation/specs/drop-index-concurrently-1.spec index 812b5de2261..a57a02469d8 100644 --- a/src/test/isolation/specs/drop-index-concurrently-1.spec +++ b/src/test/isolation/specs/drop-index-concurrently-1.spec @@ -3,7 +3,8 @@ # This test shows that the concurrent write behaviour works correctly # with the expected output being 2 rows at the READ COMMITTED and READ # UNCOMMITTED transaction isolation levels, and 1 row at the other -# transaction isolation levels. +# transaction isolation levels. We ensure this is the case by checking +# the returned rows in an index scan plan and a seq scan plan. # setup { @@ -18,24 +19,25 @@ teardown } session s1 -step noseq { SET enable_seqscan = false; } step chkiso { SELECT (setting in ('read committed','read uncommitted')) AS is_read_committed FROM pg_settings WHERE name = 'default_transaction_isolation'; } -step prepi { PREPARE getrow_idx AS SELECT * FROM test_dc WHERE data=34 ORDER BY id,data; } -step preps { PREPARE getrow_seq AS SELECT * FROM test_dc WHERE data::text=34::text ORDER BY id,data; } +step prepi { PREPARE getrow_idxscan AS SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; } +step preps { PREPARE getrow_seqscan AS SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; } step begin { BEGIN; } -step explaini { EXPLAIN (COSTS OFF) EXECUTE getrow_idx; } -step explains { EXPLAIN (COSTS OFF) EXECUTE getrow_seq; } -step selecti { EXECUTE getrow_idx; } -step selects { EXECUTE getrow_seq; } +step disableseq { SET enable_seqscan = false; } +step explaini { EXPLAIN (COSTS OFF) EXECUTE getrow_idxscan; } +step enableseq { SET enable_seqscan = true; } +step explains { EXPLAIN (COSTS OFF) EXECUTE getrow_seqscan; } +step selecti { EXECUTE getrow_idxscan; } +step selects { EXECUTE getrow_seqscan; } step end { COMMIT; } session s2 setup { BEGIN; } -step select2 { SELECT * FROM test_dc WHERE data=34 ORDER BY id,data; } +step select2 { SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; } step insert2 { INSERT INTO test_dc(data) SELECT * FROM generate_series(1, 100); } step end2 { COMMIT; } session s3 step drop { DROP INDEX CONCURRENTLY test_dc_data; } -permutation noseq chkiso prepi preps begin explaini explains select2 drop insert2 end2 selecti selects end +permutation chkiso prepi preps begin disableseq explaini enableseq explains select2 drop insert2 end2 selecti selects end diff --git a/src/test/perl/PostgresVersion.pm b/src/test/perl/PostgresVersion.pm index 4e764c36a55..884d0e949b9 100644 --- a/src/test/perl/PostgresVersion.pm +++ b/src/test/perl/PostgresVersion.pm @@ -120,9 +120,12 @@ sub _version_cmp for (my $idx = 0;; $idx++) { - return 0 unless (defined $an->[$idx] && defined $bn->[$idx]); - return $an->[$idx] <=> $bn->[$idx] - if ($an->[$idx] <=> $bn->[$idx]); + return 0 + if ($idx >= @$an && $idx >= @$bn); + # treat a missing number as 0 + my ($anum, $bnum) = ($an->[$idx] || 0, $bn->[$idx] || 0); + return $anum <=> $bnum + if ($anum <=> $bnum); } } diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index 610050e1c4b..2a354ea4ef9 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -758,15 +758,11 @@ sub command_exit_is my $h = IPC::Run::start $cmd; $h->finish(); - # On Windows, the exit status of the process is returned directly as the - # process's exit code, while on Unix, it's returned in the high bits - # of the exit code (see WEXITSTATUS macro in the standard - # header file). IPC::Run's result function always returns exit code >> 8, - # assuming the Unix convention, which will always return 0 on Windows as - # long as the process was not terminated by an exception. To work around - # that, use $h->full_results on Windows instead. + # Normally, if the child called exit(N), IPC::Run::result() returns N. On + # Windows, with IPC::Run v20220807.0 and earlier, full_results() is the + # method that returns N (https://github.com/toddr/IPC-Run/issues/161). my $result = - ($Config{osname} eq "MSWin32") + ($Config{osname} eq "MSWin32" && $IPC::Run::VERSION <= 20220807.0) ? ($h->full_results)[0] : $h->result(0); is($result, $expected, $test_name); diff --git a/src/test/recovery/t/011_crash_recovery.pl b/src/test/recovery/t/011_crash_recovery.pl deleted file mode 100644 index a26e99500b2..00000000000 --- a/src/test/recovery/t/011_crash_recovery.pl +++ /dev/null @@ -1,64 +0,0 @@ - -# Copyright (c) 2021, PostgreSQL Global Development Group - -# -# Tests relating to PostgreSQL crash recovery and redo -# -use strict; -use warnings; -use PostgresNode; -use TestLib; -use Test::More; -use Config; - -plan tests => 3; - -my $node = get_new_node('primary'); -$node->init(allows_streaming => 1); -$node->start; - -my ($stdin, $stdout, $stderr) = ('', '', ''); - -# Ensure that pg_xact_status reports 'aborted' for xacts -# that were in-progress during crash. To do that, we need -# an xact to be in-progress when we crash and we need to know -# its xid. -my $tx = IPC::Run::start( - [ - 'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d', - $node->connstr('postgres') - ], - '<', - \$stdin, - '>', - \$stdout, - '2>', - \$stderr); -$stdin .= q[ -BEGIN; -CREATE TABLE mine(x integer); -SELECT pg_current_xact_id(); -]; -$tx->pump until $stdout =~ /[[:digit:]]+[\r\n]$/; - -# Status should be in-progress -my $xid = $stdout; -chomp($xid); - -is($node->safe_psql('postgres', qq[SELECT pg_xact_status('$xid');]), - 'in progress', 'own xid is in-progress'); - -# Crash and restart the postmaster -$node->stop('immediate'); -$node->start; - -# Make sure we really got a new xid -cmp_ok($node->safe_psql('postgres', 'SELECT pg_current_xact_id()'), - '>', $xid, 'new xid after restart is greater'); - -# and make sure we show the in-progress xact as aborted -is($node->safe_psql('postgres', qq[SELECT pg_xact_status('$xid');]), - 'aborted', 'xid is aborted after crash'); - -$stdin .= "\\q\n"; -$tx->finish; # wait for psql to quit gracefully diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out index 0cfa1b7464a..d29bcc0da6d 100644 --- a/src/test/regress/expected/alter_table.out +++ b/src/test/regress/expected/alter_table.out @@ -2329,12 +2329,26 @@ alter table recur1 alter column f2 type recur2; -- fails ERROR: composite type recur1 cannot be made a member of itself -- SET STORAGE may need to add a TOAST table create table test_storage (a text); +select reltoastrelid <> 0 as has_toast_table + from pg_class where oid = 'test_storage'::regclass; + has_toast_table +----------------- + t +(1 row) + alter table test_storage alter a set storage plain; -alter table test_storage add b int default 0; -- rewrite table to remove its TOAST table +-- rewrite table to remove its TOAST table; need a non-constant column default +alter table test_storage add b int default random()::int; +select reltoastrelid <> 0 as has_toast_table + from pg_class where oid = 'test_storage'::regclass; + has_toast_table +----------------- + f +(1 row) + alter table test_storage alter a set storage extended; -- re-add TOAST table select reltoastrelid <> 0 as has_toast_table -from pg_class -where oid = 'test_storage'::regclass; + from pg_class where oid = 'test_storage'::regclass; has_toast_table ----------------- t @@ -2344,11 +2358,11 @@ where oid = 'test_storage'::regclass; create index test_storage_idx on test_storage (b, a); alter table test_storage alter column a set storage external; \d+ test_storage - Table "public.test_storage" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+----------+--------------+------------- - a | text | | | | external | | - b | integer | | | 0 | plain | | + Table "public.test_storage" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+-------------------+----------+--------------+------------- + a | text | | | | external | | + b | integer | | | random()::integer | plain | | Indexes: "test_storage_idx" btree (b, a) diff --git a/src/test/regress/expected/create_view.out b/src/test/regress/expected/create_view.out index fdb0657bb72..ac88c92f398 100644 --- a/src/test/regress/expected/create_view.out +++ b/src/test/regress/expected/create_view.out @@ -1797,6 +1797,7 @@ select pg_get_viewdef('tt20v', true); -- reverse-listing of various special function syntaxes required by SQL create view tt201v as select + ('2022-12-01'::date + '1 day'::interval) at time zone 'UTC' as atz, extract(day from now()) as extr, (now(), '1 day'::interval) overlaps (current_timestamp(2), '1 day'::interval) as o, @@ -1819,10 +1820,11 @@ select select pg_get_viewdef('tt201v', true); pg_get_viewdef ----------------------------------------------------------------------------------------------- - SELECT EXTRACT(day FROM now()) AS extr, + + SELECT (('12-01-2022'::date + '@ 1 day'::interval) AT TIME ZONE 'UTC'::text) AS atz, + + EXTRACT(day FROM now()) AS extr, + ((now(), '@ 1 day'::interval) OVERLAPS (CURRENT_TIMESTAMP(2), '@ 1 day'::interval)) AS o,+ - (('foo'::text) IS NORMALIZED) AS isn, + - (('foo'::text) IS NFKC NORMALIZED) AS isnn, + + ('foo'::text IS NORMALIZED) AS isn, + + ('foo'::text IS NFKC NORMALIZED) AS isnn, + NORMALIZE('foo'::text) AS n, + NORMALIZE('foo'::text, NFKD) AS nfkd, + OVERLAY('foo'::text PLACING 'bar'::text FROM 2) AS ovl, + diff --git a/src/test/regress/expected/expressions.out b/src/test/regress/expected/expressions.out index 4168cd55af6..56b8c9c6189 100644 --- a/src/test/regress/expected/expressions.out +++ b/src/test/regress/expected/expressions.out @@ -57,7 +57,7 @@ SELECT now()::timestamp::text = localtimestamp::text; t (1 row) --- current_role/user/user is tested in rolnames.sql +-- current_role/user/user is tested in rolenames.sql -- current database / catalog SELECT current_catalog = current_database(); ?column? diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index 73d95559569..1e82018e357 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -3570,6 +3570,26 @@ where b; 0 | t | t (2 rows) +-- Test PHV in a semijoin qual, which confused useless-RTE removal (bug #17700) +explain (verbose, costs off) +with ctetable as not materialized ( select 1 as f1 ) +select * from ctetable c1 +where f1 in ( select c3.f1 from ctetable c2 full join ctetable c3 on true ); + QUERY PLAN +---------------------------- + Result + Output: 1 + One-Time Filter: (1 = 1) +(3 rows) + +with ctetable as not materialized ( select 1 as f1 ) +select * from ctetable c1 +where f1 in ( select c3.f1 from ctetable c2 full join ctetable c3 on true ); + f1 +---- + 1 +(1 row) + -- -- test inlining of immutable functions -- diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out index 635016d4aad..aeb92caa06d 100644 --- a/src/test/regress/expected/jsonb.out +++ b/src/test/regress/expected/jsonb.out @@ -5207,6 +5207,40 @@ DETAIL: The path assumes key is a composite object, but it is a scalar value. update test_jsonb_subscript set test_json[0][0] = '1'; ERROR: cannot replace existing key DETAIL: The path assumes key is a composite object, but it is a scalar value. +-- try some things with short-header and toasted subscript values +drop table test_jsonb_subscript; +create temp table test_jsonb_subscript ( + id text, + test_json jsonb +); +insert into test_jsonb_subscript values('foo', '{"foo": "bar"}'); +insert into test_jsonb_subscript + select s, ('{"' || s || '": "bar"}')::jsonb from repeat('xyzzy', 500) s; +select length(id), test_json[id] from test_jsonb_subscript; + length | test_json +--------+----------- + 3 | "bar" + 2500 | "bar" +(2 rows) + +update test_jsonb_subscript set test_json[id] = '"baz"'; +select length(id), test_json[id] from test_jsonb_subscript; + length | test_json +--------+----------- + 3 | "baz" + 2500 | "baz" +(2 rows) + +\x +table test_jsonb_subscript; +-[ RECORD 1 ]-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +id | foo +test_json | {"foo": "baz"} +-[ RECORD 2 ]-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +id | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzy +test_json | {"xyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzy": "baz"} + +\x -- jsonb to tsvector select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb); to_tsvector diff --git a/src/test/regress/expected/jsonb_jsonpath.out b/src/test/regress/expected/jsonb_jsonpath.out index 508ddd797ed..328a6b39199 100644 --- a/src/test/regress/expected/jsonb_jsonpath.out +++ b/src/test/regress/expected/jsonb_jsonpath.out @@ -2212,6 +2212,14 @@ SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*] ? (@.a > 10)'); ------------------ (0 rows) +SELECT jsonb_path_query('[{"a": 1}]', '$undefined_var'); +ERROR: could not find jsonpath variable "undefined_var" +SELECT jsonb_path_query('[{"a": 1}]', 'false'); + jsonb_path_query +------------------ + false +(1 row) + SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a'); ERROR: JSON object does not contain key "a" SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a'); @@ -2282,6 +2290,14 @@ SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*]. (1 row) +SELECT jsonb_path_query_first('[{"a": 1}]', '$undefined_var'); +ERROR: could not find jsonpath variable "undefined_var" +SELECT jsonb_path_query_first('[{"a": 1}]', 'false'); + jsonb_path_query_first +------------------------ + false +(1 row) + SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*].a ? (@ > 1)'; ?column? ---------- @@ -2312,6 +2328,14 @@ SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@. f (1 row) +SELECT jsonb_path_exists('[{"a": 1}]', '$undefined_var'); +ERROR: could not find jsonpath variable "undefined_var" +SELECT jsonb_path_exists('[{"a": 1}]', 'false'); + jsonb_path_exists +------------------- + t +(1 row) + SELECT jsonb_path_match('true', '$', silent => false); jsonb_path_match ------------------ @@ -2374,6 +2398,14 @@ SELECT jsonb_path_match('[{"a": 1}, {"a": 2}]', '$[*].a > 1'); t (1 row) +SELECT jsonb_path_match('[{"a": 1}]', '$undefined_var'); +ERROR: could not find jsonpath variable "undefined_var" +SELECT jsonb_path_match('[{"a": 1}]', 'false'); + jsonb_path_match +------------------ + f +(1 row) + -- test string comparison (Unicode codepoint collation) WITH str(j, num) AS ( diff --git a/src/test/regress/expected/memoize.out b/src/test/regress/expected/memoize.out index 3dfc46118da..09b8b5fa719 100644 --- a/src/test/regress/expected/memoize.out +++ b/src/test/regress/expected/memoize.out @@ -209,6 +209,45 @@ SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.t >= s2.t;', false); (7 rows) DROP TABLE strtest; +-- Ensure memoize works with partitionwise join +SET enable_partitionwise_join TO on; +CREATE TABLE prt (a int) PARTITION BY RANGE(a); +CREATE TABLE prt_p1 PARTITION OF prt FOR VALUES FROM (0) TO (10); +CREATE TABLE prt_p2 PARTITION OF prt FOR VALUES FROM (10) TO (20); +INSERT INTO prt VALUES (0), (0), (0), (0); +INSERT INTO prt VALUES (10), (10), (10), (10); +CREATE INDEX iprt_p1_a ON prt_p1 (a); +CREATE INDEX iprt_p2_a ON prt_p2 (a); +ANALYZE prt; +SELECT explain_memoize(' +SELECT * FROM prt t1 INNER JOIN prt t2 ON t1.a = t2.a;', false); + explain_memoize +------------------------------------------------------------------------------------------ + Append (actual rows=32 loops=N) + -> Nested Loop (actual rows=16 loops=N) + -> Index Only Scan using iprt_p1_a on prt_p1 t1_1 (actual rows=4 loops=N) + Heap Fetches: N + -> Memoize (actual rows=4 loops=N) + Cache Key: t1_1.a + Cache Mode: logical + Hits: 3 Misses: 1 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Index Only Scan using iprt_p1_a on prt_p1 t2_1 (actual rows=4 loops=N) + Index Cond: (a = t1_1.a) + Heap Fetches: N + -> Nested Loop (actual rows=16 loops=N) + -> Index Only Scan using iprt_p2_a on prt_p2 t1_2 (actual rows=4 loops=N) + Heap Fetches: N + -> Memoize (actual rows=4 loops=N) + Cache Key: t1_2.a + Cache Mode: logical + Hits: 3 Misses: 1 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Index Only Scan using iprt_p2_a on prt_p2 t2_2 (actual rows=4 loops=N) + Index Cond: (a = t1_2.a) + Heap Fetches: N +(21 rows) + +DROP TABLE prt; +RESET enable_partitionwise_join; -- Exercise Memoize code that flushes the cache when a parameter changes which -- is not part of the cache key. -- Ensure we get a Memoize plan diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out index a5951c0c6d1..2f06a903b57 100644 --- a/src/test/regress/expected/psql.out +++ b/src/test/regress/expected/psql.out @@ -5328,6 +5328,13 @@ List of access methods pg_catalog | bit_xor | smallint | smallint | agg (3 rows) +\df *._pg_expandarray + List of functions + Schema | Name | Result data type | Argument data types | Type +--------------------+-----------------+------------------+-------------------------------------------+------ + information_schema | _pg_expandarray | SETOF record | anyarray, OUT x anyelement, OUT n integer | func +(1 row) + \do - pg_catalog.int4 List of operators Schema | Name | Left arg type | Right arg type | Result type | Description @@ -5342,6 +5349,61 @@ List of access methods pg_catalog | && | anyarray | anyarray | boolean | overlaps (1 row) +-- check \sf +\sf information_schema._pg_expandarray +CREATE OR REPLACE FUNCTION information_schema._pg_expandarray(anyarray, OUT x anyelement, OUT n integer) + RETURNS SETOF record + LANGUAGE sql + IMMUTABLE PARALLEL SAFE STRICT +AS $function$select $1[s], + s operator(pg_catalog.-) pg_catalog.array_lower($1,1) operator(pg_catalog.+) 1 + from pg_catalog.generate_series(pg_catalog.array_lower($1,1), + pg_catalog.array_upper($1,1), + 1) as g(s)$function$ +\sf+ information_schema._pg_expandarray + CREATE OR REPLACE FUNCTION information_schema._pg_expandarray(anyarray, OUT x anyelement, OUT n integer) + RETURNS SETOF record + LANGUAGE sql + IMMUTABLE PARALLEL SAFE STRICT +1 AS $function$select $1[s], +2 s operator(pg_catalog.-) pg_catalog.array_lower($1,1) operator(pg_catalog.+) 1 +3 from pg_catalog.generate_series(pg_catalog.array_lower($1,1), +4 pg_catalog.array_upper($1,1), +5 1) as g(s)$function$ +\sf+ interval_pl_time + CREATE OR REPLACE FUNCTION pg_catalog.interval_pl_time(interval, time without time zone) + RETURNS time without time zone + LANGUAGE sql + IMMUTABLE PARALLEL SAFE STRICT COST 1 +1 RETURN ($2 + $1) +\sf ts_debug(text) +CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) + RETURNS SETOF record + LANGUAGE sql + STABLE PARALLEL SAFE STRICT +BEGIN ATOMIC + SELECT ts_debug.alias, + ts_debug.description, + ts_debug.token, + ts_debug.dictionaries, + ts_debug.dictionary, + ts_debug.lexemes + FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes); +END +\sf+ ts_debug(text) + CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) + RETURNS SETOF record + LANGUAGE sql + STABLE PARALLEL SAFE STRICT +1 BEGIN ATOMIC +2 SELECT ts_debug.alias, +3 ts_debug.description, +4 ts_debug.token, +5 ts_debug.dictionaries, +6 ts_debug.dictionary, +7 ts_debug.lexemes +8 FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes); +9 END -- check describing invalid multipart names \dA regression.heap improper qualified name (too many dotted names): regression.heap diff --git a/src/test/regress/expected/replica_identity.out b/src/test/regress/expected/replica_identity.out index c2fe3e476e3..58b4fca9926 100644 --- a/src/test/regress/expected/replica_identity.out +++ b/src/test/regress/expected/replica_identity.out @@ -239,7 +239,44 @@ Indexes: -- used as replica identity. ALTER TABLE test_replica_identity3 ALTER COLUMN id DROP NOT NULL; ERROR: column "id" is in index used as replica identity +-- +-- Test that replica identity can be set on an index that's not yet valid. +-- (This matches the way pg_dump will try to dump a partitioned table.) +-- +CREATE TABLE test_replica_identity4(id integer NOT NULL) PARTITION BY LIST (id); +CREATE TABLE test_replica_identity4_1(id integer NOT NULL); +ALTER TABLE ONLY test_replica_identity4 + ATTACH PARTITION test_replica_identity4_1 FOR VALUES IN (1); +ALTER TABLE ONLY test_replica_identity4 + ADD CONSTRAINT test_replica_identity4_pkey PRIMARY KEY (id); +ALTER TABLE ONLY test_replica_identity4 + REPLICA IDENTITY USING INDEX test_replica_identity4_pkey; +ALTER TABLE ONLY test_replica_identity4_1 + ADD CONSTRAINT test_replica_identity4_1_pkey PRIMARY KEY (id); +\d+ test_replica_identity4 + Partitioned table "public.test_replica_identity4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | +Partition key: LIST (id) +Indexes: + "test_replica_identity4_pkey" PRIMARY KEY, btree (id) INVALID REPLICA IDENTITY +Partitions: test_replica_identity4_1 FOR VALUES IN (1) + +ALTER INDEX test_replica_identity4_pkey + ATTACH PARTITION test_replica_identity4_1_pkey; +\d+ test_replica_identity4 + Partitioned table "public.test_replica_identity4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | +Partition key: LIST (id) +Indexes: + "test_replica_identity4_pkey" PRIMARY KEY, btree (id) REPLICA IDENTITY +Partitions: test_replica_identity4_1 FOR VALUES IN (1) + DROP TABLE test_replica_identity; DROP TABLE test_replica_identity2; DROP TABLE test_replica_identity3; +DROP TABLE test_replica_identity4; DROP TABLE test_replica_identity_othertable; diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index 16a471245a9..a2922a0a9ec 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -3099,11 +3099,11 @@ select pg_get_viewdef('shoe'::regclass,0) as prettier; -- -- check multi-row VALUES in rules -- -create table rules_src(f1 int, f2 int); -create table rules_log(f1 int, f2 int, tag text); +create table rules_src(f1 int, f2 int default 0); +create table rules_log(f1 int, f2 int, tag text, id serial); insert into rules_src values(1,2), (11,12); create rule r1 as on update to rules_src do also - insert into rules_log values(old.*, 'old'), (new.*, 'new'); + insert into rules_log values(old.*, 'old', default), (new.*, 'new', default); update rules_src set f2 = f2 + 1; update rules_src set f2 = f2 * 10; select * from rules_src; @@ -3114,16 +3114,16 @@ select * from rules_src; (2 rows) select * from rules_log; - f1 | f2 | tag -----+-----+----- - 1 | 2 | old - 1 | 3 | new - 11 | 12 | old - 11 | 13 | new - 1 | 3 | old - 1 | 30 | new - 11 | 13 | old - 11 | 130 | new + f1 | f2 | tag | id +----+-----+-----+---- + 1 | 2 | old | 1 + 1 | 3 | new | 2 + 11 | 12 | old | 3 + 11 | 13 | new | 4 + 1 | 3 | old | 5 + 1 | 30 | new | 6 + 11 | 13 | old | 7 + 11 | 130 | new | 8 (8 rows) create rule r2 as on update to rules_src do also @@ -3137,71 +3137,84 @@ update rules_src set f2 = f2 / 10; 11 | 13 | new (4 rows) +create rule r3 as on insert to rules_src do also + insert into rules_log values(null, null, '-', default), (new.*, 'new', default); +insert into rules_src values(22,23), (33,default); select * from rules_src; f1 | f2 ----+---- 1 | 3 11 | 13 -(2 rows) + 22 | 23 + 33 | 0 +(4 rows) select * from rules_log; - f1 | f2 | tag -----+-----+----- - 1 | 2 | old - 1 | 3 | new - 11 | 12 | old - 11 | 13 | new - 1 | 3 | old - 1 | 30 | new - 11 | 13 | old - 11 | 130 | new - 1 | 30 | old - 1 | 3 | new - 11 | 130 | old - 11 | 13 | new -(12 rows) - -create rule r3 as on delete to rules_src do notify rules_src_deletion; + f1 | f2 | tag | id +----+-----+-----+---- + 1 | 2 | old | 1 + 1 | 3 | new | 2 + 11 | 12 | old | 3 + 11 | 13 | new | 4 + 1 | 3 | old | 5 + 1 | 30 | new | 6 + 11 | 13 | old | 7 + 11 | 130 | new | 8 + 1 | 30 | old | 9 + 1 | 3 | new | 10 + 11 | 130 | old | 11 + 11 | 13 | new | 12 + | | - | 13 + 22 | 23 | new | 14 + | | - | 15 + 33 | 0 | new | 16 +(16 rows) + +create rule r4 as on delete to rules_src do notify rules_src_deletion; \d+ rules_src Table "public.rules_src" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+---------+--------------+------------- f1 | integer | | | | plain | | - f2 | integer | | | | plain | | + f2 | integer | | | 0 | plain | | Rules: r1 AS - ON UPDATE TO rules_src DO INSERT INTO rules_log (f1, f2, tag) VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text) + ON UPDATE TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (old.f1,old.f2,'old'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) r2 AS ON UPDATE TO rules_src DO VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text) r3 AS + ON INSERT TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (NULL::integer,NULL::integer,'-'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) + r4 AS ON DELETE TO rules_src DO NOTIFY rules_src_deletion -- -- Ensure an aliased target relation for insert is correctly deparsed. -- -create rule r4 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2; -create rule r5 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1; +create rule r5 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2; +create rule r6 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1; \d+ rules_src Table "public.rules_src" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+---------+--------------+------------- f1 | integer | | | | plain | | - f2 | integer | | | | plain | | + f2 | integer | | | 0 | plain | | Rules: r1 AS - ON UPDATE TO rules_src DO INSERT INTO rules_log (f1, f2, tag) VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text) + ON UPDATE TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (old.f1,old.f2,'old'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) r2 AS ON UPDATE TO rules_src DO VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text) r3 AS + ON INSERT TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (NULL::integer,NULL::integer,'-'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) + r4 AS ON DELETE TO rules_src DO NOTIFY rules_src_deletion - r4 AS + r5 AS ON INSERT TO rules_src DO INSTEAD INSERT INTO rules_log AS trgt (f1, f2) SELECT new.f1, new.f2 RETURNING trgt.f1, trgt.f2 - r5 AS + r6 AS ON UPDATE TO rules_src DO INSTEAD UPDATE rules_log trgt SET tag = 'updated'::text WHERE trgt.f1 = new.f1 diff --git a/src/test/regress/expected/subscription.out b/src/test/regress/expected/subscription.out index e1bac50dfe4..7075b45a4f7 100644 --- a/src/test/regress/expected/subscription.out +++ b/src/test/regress/expected/subscription.out @@ -70,7 +70,15 @@ ERROR: cannot enable subscription that does not have a slot name ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION; ERROR: ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions DROP SUBSCRIPTION regress_testsub3; --- fail - invalid connection string +-- fail, connection string does not parse +CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'i_dont_exist=param' PUBLICATION testpub; +ERROR: invalid connection string syntax: invalid connection option "i_dont_exist" + +-- fail, connection string parses, but doesn't work (and does so without +-- connecting, so this is reliable and safe) +CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'port=-1' PUBLICATION testpub; +ERROR: could not connect to the publisher: invalid port number: "-1" +-- fail - invalid connection string during ALTER ALTER SUBSCRIPTION regress_testsub CONNECTION 'foobar'; ERROR: invalid connection string syntax: missing "=" after "foobar" in connection info string \dRs+ diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index 9320cf0aeec..c479142222e 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -3087,6 +3087,17 @@ regression_main(int argc, char *argv[], optind++; } + /* + * We must have a database to run the tests in; either a default name, or + * one supplied by the --dbname switch. + */ + if (!(dblist && dblist->str && dblist->str[0])) + { + fprintf(stderr, _("%s: no database name was specified\n"), + progname); + exit(2); + } + if (config_auth_datadir) { #ifdef ENABLE_SSPI diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql index ed8941a890a..7ddf9f898a8 100644 --- a/src/test/regress/sql/alter_table.sql +++ b/src/test/regress/sql/alter_table.sql @@ -1567,13 +1567,16 @@ alter table recur1 alter column f2 type recur2; -- fails -- SET STORAGE may need to add a TOAST table create table test_storage (a text); +select reltoastrelid <> 0 as has_toast_table + from pg_class where oid = 'test_storage'::regclass; alter table test_storage alter a set storage plain; -alter table test_storage add b int default 0; -- rewrite table to remove its TOAST table +-- rewrite table to remove its TOAST table; need a non-constant column default +alter table test_storage add b int default random()::int; +select reltoastrelid <> 0 as has_toast_table + from pg_class where oid = 'test_storage'::regclass; alter table test_storage alter a set storage extended; -- re-add TOAST table - select reltoastrelid <> 0 as has_toast_table -from pg_class -where oid = 'test_storage'::regclass; + from pg_class where oid = 'test_storage'::regclass; -- test that SET STORAGE propagates to index correctly create index test_storage_idx on test_storage (b, a); diff --git a/src/test/regress/sql/create_view.sql b/src/test/regress/sql/create_view.sql index 2e7452ac9ea..f35364b8a23 100644 --- a/src/test/regress/sql/create_view.sql +++ b/src/test/regress/sql/create_view.sql @@ -615,6 +615,7 @@ select pg_get_viewdef('tt20v', true); create view tt201v as select + ('2022-12-01'::date + '1 day'::interval) at time zone 'UTC' as atz, extract(day from now()) as extr, (now(), '1 day'::interval) overlaps (current_timestamp(2), '1 day'::interval) as o, diff --git a/src/test/regress/sql/expressions.sql b/src/test/regress/sql/expressions.sql index f9f9f97efa4..f87751f4ef5 100644 --- a/src/test/regress/sql/expressions.sql +++ b/src/test/regress/sql/expressions.sql @@ -24,7 +24,7 @@ SELECT length(current_timestamp::text) >= length(current_timestamp(0)::text); -- localtimestamp SELECT now()::timestamp::text = localtimestamp::text; --- current_role/user/user is tested in rolnames.sql +-- current_role/user/user is tested in rolenames.sql -- current database / catalog SELECT current_catalog = current_database(); diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 1b994597d3c..9c6823a0889 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -1124,6 +1124,16 @@ select * from select a as b) as t3 where b; +-- Test PHV in a semijoin qual, which confused useless-RTE removal (bug #17700) +explain (verbose, costs off) +with ctetable as not materialized ( select 1 as f1 ) +select * from ctetable c1 +where f1 in ( select c3.f1 from ctetable c2 full join ctetable c3 on true ); + +with ctetable as not materialized ( select 1 as f1 ) +select * from ctetable c1 +where f1 in ( select c3.f1 from ctetable c2 full join ctetable c3 on true ); + -- -- test inlining of immutable functions -- diff --git a/src/test/regress/sql/jsonb.sql b/src/test/regress/sql/jsonb.sql index b1ce452a822..c48f9944070 100644 --- a/src/test/regress/sql/jsonb.sql +++ b/src/test/regress/sql/jsonb.sql @@ -1410,6 +1410,24 @@ insert into test_jsonb_subscript values (1, 'null'); update test_jsonb_subscript set test_json[0] = '1'; update test_jsonb_subscript set test_json[0][0] = '1'; +-- try some things with short-header and toasted subscript values + +drop table test_jsonb_subscript; +create temp table test_jsonb_subscript ( + id text, + test_json jsonb +); + +insert into test_jsonb_subscript values('foo', '{"foo": "bar"}'); +insert into test_jsonb_subscript + select s, ('{"' || s || '": "bar"}')::jsonb from repeat('xyzzy', 500) s; +select length(id), test_json[id] from test_jsonb_subscript; +update test_jsonb_subscript set test_json[id] = '"baz"'; +select length(id), test_json[id] from test_jsonb_subscript; +\x +table test_jsonb_subscript; +\x + -- jsonb to tsvector select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb); diff --git a/src/test/regress/sql/jsonb_jsonpath.sql b/src/test/regress/sql/jsonb_jsonpath.sql index 60f73cb0590..bd025077d52 100644 --- a/src/test/regress/sql/jsonb_jsonpath.sql +++ b/src/test/regress/sql/jsonb_jsonpath.sql @@ -532,6 +532,8 @@ set time zone default; SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*]'); SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*] ? (@.a > 10)'); +SELECT jsonb_path_query('[{"a": 1}]', '$undefined_var'); +SELECT jsonb_path_query('[{"a": 1}]', 'false'); SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a'); SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a'); @@ -547,12 +549,16 @@ SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)'); SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)'); SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}'); SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}'); +SELECT jsonb_path_query_first('[{"a": 1}]', '$undefined_var'); +SELECT jsonb_path_query_first('[{"a": 1}]', 'false'); SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*].a ? (@ > 1)'; SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*] ? (@.a > 2)'; SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 1)'); SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 1, "max": 4}'); SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 3, "max": 4}'); +SELECT jsonb_path_exists('[{"a": 1}]', '$undefined_var'); +SELECT jsonb_path_exists('[{"a": 1}]', 'false'); SELECT jsonb_path_match('true', '$', silent => false); SELECT jsonb_path_match('false', '$', silent => false); @@ -569,6 +575,8 @@ SELECT jsonb_path_match('[true, true]', '$[*]', silent => false); SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 1'; SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 2'; SELECT jsonb_path_match('[{"a": 1}, {"a": 2}]', '$[*].a > 1'); +SELECT jsonb_path_match('[{"a": 1}]', '$undefined_var'); +SELECT jsonb_path_match('[{"a": 1}]', 'false'); -- test string comparison (Unicode codepoint collation) WITH str(j, num) AS diff --git a/src/test/regress/sql/memoize.sql b/src/test/regress/sql/memoize.sql index 68ce217df1b..78ae7bb782c 100644 --- a/src/test/regress/sql/memoize.sql +++ b/src/test/regress/sql/memoize.sql @@ -110,6 +110,25 @@ SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.t >= s2.t;', false); DROP TABLE strtest; +-- Ensure memoize works with partitionwise join +SET enable_partitionwise_join TO on; + +CREATE TABLE prt (a int) PARTITION BY RANGE(a); +CREATE TABLE prt_p1 PARTITION OF prt FOR VALUES FROM (0) TO (10); +CREATE TABLE prt_p2 PARTITION OF prt FOR VALUES FROM (10) TO (20); +INSERT INTO prt VALUES (0), (0), (0), (0); +INSERT INTO prt VALUES (10), (10), (10), (10); +CREATE INDEX iprt_p1_a ON prt_p1 (a); +CREATE INDEX iprt_p2_a ON prt_p2 (a); +ANALYZE prt; + +SELECT explain_memoize(' +SELECT * FROM prt t1 INNER JOIN prt t2 ON t1.a = t2.a;', false); + +DROP TABLE prt; + +RESET enable_partitionwise_join; + -- Exercise Memoize code that flushes the cache when a parameter changes which -- is not part of the cache key. diff --git a/src/test/regress/sql/psql.sql b/src/test/regress/sql/psql.sql index 501579f22a8..467d0b42143 100644 --- a/src/test/regress/sql/psql.sql +++ b/src/test/regress/sql/psql.sql @@ -1252,9 +1252,17 @@ drop role regress_partitioning_role; \df has_database_privilege oid text \df has_database_privilege oid text - \dfa bit* small* +\df *._pg_expandarray \do - pg_catalog.int4 \do && anyarray * +-- check \sf +\sf information_schema._pg_expandarray +\sf+ information_schema._pg_expandarray +\sf+ interval_pl_time +\sf ts_debug(text) +\sf+ ts_debug(text) + -- check describing invalid multipart names \dA regression.heap \dA nonesuch.heap diff --git a/src/test/regress/sql/replica_identity.sql b/src/test/regress/sql/replica_identity.sql index 11974cf25af..26ee7423614 100644 --- a/src/test/regress/sql/replica_identity.sql +++ b/src/test/regress/sql/replica_identity.sql @@ -106,7 +106,27 @@ ALTER TABLE test_replica_identity3 ALTER COLUMN id TYPE bigint; -- used as replica identity. ALTER TABLE test_replica_identity3 ALTER COLUMN id DROP NOT NULL; +-- +-- Test that replica identity can be set on an index that's not yet valid. +-- (This matches the way pg_dump will try to dump a partitioned table.) +-- +CREATE TABLE test_replica_identity4(id integer NOT NULL) PARTITION BY LIST (id); +CREATE TABLE test_replica_identity4_1(id integer NOT NULL); +ALTER TABLE ONLY test_replica_identity4 + ATTACH PARTITION test_replica_identity4_1 FOR VALUES IN (1); +ALTER TABLE ONLY test_replica_identity4 + ADD CONSTRAINT test_replica_identity4_pkey PRIMARY KEY (id); +ALTER TABLE ONLY test_replica_identity4 + REPLICA IDENTITY USING INDEX test_replica_identity4_pkey; +ALTER TABLE ONLY test_replica_identity4_1 + ADD CONSTRAINT test_replica_identity4_1_pkey PRIMARY KEY (id); +\d+ test_replica_identity4 +ALTER INDEX test_replica_identity4_pkey + ATTACH PARTITION test_replica_identity4_1_pkey; +\d+ test_replica_identity4 + DROP TABLE test_replica_identity; DROP TABLE test_replica_identity2; DROP TABLE test_replica_identity3; +DROP TABLE test_replica_identity4; DROP TABLE test_replica_identity_othertable; diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql index 078eabcd4cb..7b0cd28720c 100644 --- a/src/test/regress/sql/rules.sql +++ b/src/test/regress/sql/rules.sql @@ -1016,11 +1016,11 @@ select pg_get_viewdef('shoe'::regclass,0) as prettier; -- check multi-row VALUES in rules -- -create table rules_src(f1 int, f2 int); -create table rules_log(f1 int, f2 int, tag text); +create table rules_src(f1 int, f2 int default 0); +create table rules_log(f1 int, f2 int, tag text, id serial); insert into rules_src values(1,2), (11,12); create rule r1 as on update to rules_src do also - insert into rules_log values(old.*, 'old'), (new.*, 'new'); + insert into rules_log values(old.*, 'old', default), (new.*, 'new', default); update rules_src set f2 = f2 + 1; update rules_src set f2 = f2 * 10; select * from rules_src; @@ -1028,16 +1028,19 @@ select * from rules_log; create rule r2 as on update to rules_src do also values(old.*, 'old'), (new.*, 'new'); update rules_src set f2 = f2 / 10; +create rule r3 as on insert to rules_src do also + insert into rules_log values(null, null, '-', default), (new.*, 'new', default); +insert into rules_src values(22,23), (33,default); select * from rules_src; select * from rules_log; -create rule r3 as on delete to rules_src do notify rules_src_deletion; +create rule r4 as on delete to rules_src do notify rules_src_deletion; \d+ rules_src -- -- Ensure an aliased target relation for insert is correctly deparsed. -- -create rule r4 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2; -create rule r5 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1; +create rule r5 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2; +create rule r6 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1; \d+ rules_src -- diff --git a/src/test/regress/sql/subscription.sql b/src/test/regress/sql/subscription.sql index 855a341a3d9..c8a2d08e721 100644 --- a/src/test/regress/sql/subscription.sql +++ b/src/test/regress/sql/subscription.sql @@ -56,7 +56,14 @@ ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION; DROP SUBSCRIPTION regress_testsub3; --- fail - invalid connection string +-- fail, connection string does not parse +CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'i_dont_exist=param' PUBLICATION testpub; + +-- fail, connection string parses, but doesn't work (and does so without +-- connecting, so this is reliable and safe) +CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'port=-1' PUBLICATION testpub; + +-- fail - invalid connection string during ALTER ALTER SUBSCRIPTION regress_testsub CONNECTION 'foobar'; \dRs+ diff --git a/src/test/subscription/t/004_sync.pl b/src/test/subscription/t/004_sync.pl index 959e47fad5e..545599b8f8d 100644 --- a/src/test/subscription/t/004_sync.pl +++ b/src/test/subscription/t/004_sync.pl @@ -163,9 +163,13 @@ # subscriber is stuck on data copy for constraint violation. $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub"); -$result = $node_publisher->safe_psql('postgres', - "SELECT count(*) FROM pg_replication_slots"); -is($result, qq(0), +# When DROP SUBSCRIPTION tries to drop the tablesync slot, the slot may not +# have been created, which causes the slot to be created after the DROP +# SUSCRIPTION finishes. Such slots eventually get dropped at walsender exit +# time. So, to prevent being affected by such ephemeral tablesync slots, we +# wait until all the slots have been cleaned. +ok( $node_publisher->poll_query_until( + 'postgres', 'SELECT count(*) = 0 FROM pg_replication_slots'), 'DROP SUBSCRIPTION during error can clean up the slots on the publisher'); $node_subscriber->stop('fast'); diff --git a/src/timezone/data/tzdata.zi b/src/timezone/data/tzdata.zi index a36449f46bc..3db1460e1be 100644 --- a/src/timezone/data/tzdata.zi +++ b/src/timezone/data/tzdata.zi @@ -1,4 +1,4 @@ -# version 2022f +# version 2022g # This zic input file is in the public domain. R d 1916 o - Jun 14 23s 1 S R d 1916 1919 - O Su>=1 23s 0 - @@ -1040,7 +1040,7 @@ Z Asia/Singapore 6:55:25 - LMT 1901 7:20 - +0720 1941 S 7:30 - +0730 1942 F 16 9 - +09 1945 S 12 -7:30 - +0730 1982 +7:30 - +0730 1981 D 31 16u 8 - +08 Z Asia/Colombo 5:19:24 - LMT 1880 5:19:32 - MMT 1906 @@ -1754,7 +1754,8 @@ Z America/Scoresbysund -1:27:52 - LMT 1916 Jul 28 -1 E -01/+00 Z America/Nuuk -3:26:56 - LMT 1916 Jul 28 -3 - -03 1980 Ap 6 2 --3 E -03/-02 +-3 E -03/-02 2023 Mar 25 22 +-2 - -02 Z America/Thule -4:35:8 - LMT 1916 Jul 28 -4 Th A%sT Z Europe/Tallinn 1:39 - LMT 1880 @@ -3044,16 +3045,11 @@ R Y 1919 o - N 1 0 0 S R Y 1942 o - F 9 2 1 W R Y 1945 o - Au 14 23u 1 P R Y 1945 o - S 30 2 0 S -R Y 1965 o - Ap lastSu 0 2 DD -R Y 1965 o - O lastSu 2 0 S -R Y 1980 1986 - Ap lastSu 2 1 D -R Y 1980 2006 - O lastSu 2 0 S +R Y 1972 1986 - Ap lastSu 2 1 D +R Y 1972 2006 - O lastSu 2 0 S R Y 1987 2006 - Ap Su>=1 2 1 D -Z America/Pangnirtung 0 - -00 1921 --4 Y A%sT 1995 Ap Su>=1 2 --5 C E%sT 1999 O 31 2 --6 C C%sT 2000 O 29 2 --5 C E%sT +R Yu 1965 o - Ap lastSu 0 2 DD +R Yu 1965 o - O lastSu 2 0 S Z America/Iqaluit 0 - -00 1942 Au -5 Y E%sT 1999 O 31 2 -6 C C%sT 2000 O 29 2 @@ -3082,13 +3078,15 @@ Z America/Inuvik 0 - -00 1953 -7 Y M%sT 1980 -7 C M%sT Z America/Whitehorse -9:0:12 - LMT 1900 Au 20 --9 Y Y%sT 1967 May 28 --8 Y P%sT 1980 +-9 Y Y%sT 1965 +-9 Yu Y%sT 1966 F 27 +-8 - PST 1980 -8 C P%sT 2020 N -7 - MST Z America/Dawson -9:17:40 - LMT 1900 Au 20 --9 Y Y%sT 1973 O 28 --8 Y P%sT 1980 +-9 Y Y%sT 1965 +-9 Yu Y%sT 1973 O 28 +-8 - PST 1980 -8 C P%sT 2020 N -7 - MST R m 1931 o - May 1 23 1 D @@ -3132,6 +3130,17 @@ Z America/Mexico_City -6:36:36 - LMT 1922 Ja 1 7u -6 m C%sT 2001 S 30 2 -6 - CST 2002 F 20 -6 m C%sT +Z America/Ciudad_Juarez -7:5:56 - LMT 1922 Ja 1 7u +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 m M%sT 1932 Ap +-6 - CST 1996 +-6 m C%sT 1998 +-6 - CST 1998 Ap Su>=1 3 +-7 m M%sT 2010 +-7 u M%sT 2022 O 30 2 +-6 - CST 2022 N 30 +-7 u M%sT Z America/Ojinaga -6:57:40 - LMT 1922 Ja 1 7u -7 - MST 1927 Jun 10 23 -6 - CST 1930 N 15 @@ -3141,7 +3150,8 @@ Z America/Ojinaga -6:57:40 - LMT 1922 Ja 1 7u -6 - CST 1998 Ap Su>=1 3 -7 m M%sT 2010 -7 u M%sT 2022 O 30 2 --6 - CST +-6 - CST 2022 N 30 +-6 u C%sT Z America/Chihuahua -7:4:20 - LMT 1922 Ja 1 7u -7 - MST 1927 Jun 10 23 -6 - CST 1930 N 15 @@ -3771,7 +3781,7 @@ Z Antarctica/Palmer 0 - -00 1965 -4 x -04/-03 2016 D 4 -3 - -03 R CO 1992 o - May 3 0 1 - -R CO 1993 o - Ap 4 0 0 - +R CO 1993 o - F 6 24 0 - Z America/Bogota -4:56:16 - LMT 1884 Mar 13 -4:56:16 - BMT 1914 N 23 -5 CO -05/-04 @@ -4154,6 +4164,7 @@ L America/Tijuana America/Ensenada L America/Indiana/Indianapolis America/Fort_Wayne L America/Toronto America/Montreal L America/Toronto America/Nipigon +L America/Iqaluit America/Pangnirtung L America/Rio_Branco America/Porto_Acre L America/Winnipeg America/Rainy_River L America/Argentina/Cordoba America/Rosario diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index d3f32c53c84..3a9246e8578 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -608,6 +608,9 @@ sub mkvcbuild # hack to prevent duplicate definitions of uid_t/gid_t push(@perl_embed_ccflags, 'PLPERL_HAVE_UID_GID'); + # prevent binary mismatch between MSVC built plperl and + # Strawberry or msys ucrt perl libraries + push(@perl_embed_ccflags, 'NO_THREAD_SAFE_LOCALE'); # Windows offers several 32-bit ABIs. Perl is sensitive to # sizeof(time_t), one of the ABI dimensions. To get 32-bit time_t, diff --git a/src/tutorial/basics.source b/src/tutorial/basics.source index 3e74d718ab0..d09ff5029bc 100644 --- a/src/tutorial/basics.source +++ b/src/tutorial/basics.source @@ -79,6 +79,11 @@ SELECT * WHERE city = 'San Francisco' AND prcp > 0.0; +-- You can request that the results of a query be returned in sorted order: + +SELECT * FROM weather + ORDER BY city, temp_lo; + -- Here is a more complicated one. Duplicates are removed when DISTINCT is -- specified. ORDER BY specifies the column to sort on. (Just to make sure the -- following won't confuse you, DISTINCT and ORDER BY can be used separately.) @@ -108,7 +113,8 @@ SELECT city, temp_lo, temp_hi, prcp, date, location -- table name. If you want to be clear, you can do the following. They give -- identical results, of course. -SELECT weather.city, weather.temp_lo, weather.temp_hi, weather.prcp, weather.date, cities.location +SELECT weather.city, weather.temp_lo, weather.temp_hi, + weather.prcp, weather.date, cities.location FROM weather JOIN cities ON weather.city = cities.name; -- Old join syntax @@ -125,8 +131,8 @@ SELECT * -- Suppose we want to find all the records that are in the temperature range -- of other records. w1 and w2 are aliases for weather. -SELECT w1.city, w1.temp_lo, w1.temp_hi, - w2.city, w2.temp_lo, w2.temp_hi +SELECT w1.city, w1.temp_lo AS low, w1.temp_hi AS high, + w2.city, w2.temp_lo AS low, w2.temp_hi AS high FROM weather w1 JOIN weather w2 ON w1.temp_lo < w2.temp_lo AND w1.temp_hi > w2.temp_hi; @@ -142,16 +148,27 @@ SELECT city FROM weather WHERE temp_lo = (SELECT max(temp_lo) FROM weather); -- Aggregate with GROUP BY -SELECT city, max(temp_lo) +SELECT city, count(*), max(temp_lo) FROM weather GROUP BY city; -- ... and HAVING -SELECT city, max(temp_lo) +SELECT city, count(*), max(temp_lo) FROM weather GROUP BY city HAVING max(temp_lo) < 40; +-- We can filter rows before aggregating them: +SELECT city, count(*), max(temp_lo) + FROM weather + WHERE city LIKE 'S%' + GROUP BY city; + +-- Another way is the FILTER clause, which operates per-aggregate: +SELECT city, count(*) FILTER (WHERE temp_lo < 45), max(temp_lo) + FROM weather + GROUP BY city; + ----------------------------- -- Updates: