From 84956e71a3a5884d4b2e556c7322e47a7aeeb3d4 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Sun, 9 Sep 2001 17:21:59 +0000 Subject: [PATCH] Markup additions and spell check. (covers User's Guide) --- doc/src/sgml/array.sgml | 4 +- doc/src/sgml/datatype.sgml | 112 ++--- doc/src/sgml/datetime.sgml | 4 +- doc/src/sgml/func.sgml | 1052 ++++++++++++++++++++++---------------------- doc/src/sgml/history.sgml | 10 +- doc/src/sgml/indices.sgml | 6 +- doc/src/sgml/inherit.sgml | 6 +- doc/src/sgml/legal.sgml | 4 +- doc/src/sgml/manage.sgml | 26 +- doc/src/sgml/mvcc.sgml | 4 +- doc/src/sgml/notation.sgml | 10 +- doc/src/sgml/perform.sgml | 42 +- doc/src/sgml/problems.sgml | 12 +- doc/src/sgml/queries.sgml | 42 +- doc/src/sgml/syntax.sgml | 38 +- doc/src/sgml/typeconv.sgml | 28 +- 16 files changed, 704 insertions(+), 696 deletions(-) diff --git a/doc/src/sgml/array.sgml b/doc/src/sgml/array.sgml index 2fe8ea8be1..4f57f2f0e2 100644 --- a/doc/src/sgml/array.sgml +++ b/doc/src/sgml/array.sgml @@ -1,4 +1,4 @@ - + Arrays @@ -9,7 +9,7 @@ Postgres allows columns of a table to be - defined as variable-length multi-dimensional arrays. Arrays of any + defined as variable-length multidimensional arrays. Arrays of any built-in type or user-defined type can be created. To illustrate their use, we create this table: diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 7efc0a7589..dcda941ea3 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -1,5 +1,5 @@ @@ -361,59 +361,59 @@ $Header: /cvsroot/pgsql/doc/src/sgml/datatype.sgml,v 1.61 2001/09/04 03:17:54 mo - smallint + smallint 2 bytes Fixed-precision -32768 to +32767 - integer + integer 4 bytes Usual choice for fixed-precision -2147483648 to +2147483647 - bigint + bigint 8 bytes Very large range fixed-precision about 18 decimal digits - decimal + decimal variable user-specified precision, exact no limit - numeric + numeric variable user-specified precision, exact no limit - real + real 4 bytes variable-precision, inexact 6 decimal digits precision - double precision + double precision 8 bytes variable-precision, inexact 15 decimal digits precision - serial + serial 4 bytes autoincrementing integer 1 to 2147483647 - bigserial + bigserial 8 bytes autoincrementing integer 1 to 9223372036854775807 @@ -557,7 +557,7 @@ NUMERIC The data types real and double precision are inexact, variable precision numeric types. - In practice, these types are usually implementations of IEEE 754 + In practice, these types are usually implementations of IEEE 754 binary floating point (single and double precision, respectively), to the extent that the underlying processor, operating system, and compiler support it. @@ -636,7 +636,7 @@ NUMERIC - The serial datatypes are not truly types, but are a + The serial data types are not truly types, but are a notational convenience for setting up unique identifier columns in tables. In the current implementation, specifying @@ -777,21 +777,17 @@ CREATE TABLE tablename ( - character(n), char(n) + character(n), char(n) Fixed-length blank padded - character varying(n), varchar(n) + character varying(n), varchar(n) Variable-length with limit - text + text Variable unlimited length - - bytea - binary data - @@ -840,19 +836,6 @@ CREATE TABLE tablename ( - The bytea data type allows storage of binary data, - specifically allowing storage of NULLs which are entered as - '\\000'. The first backslash is interpreted by the - single quotes, and the second is recognized by bytea and - preceeds a three digit octal value. For a similar reason, a - backslash must be entered into a field as '\\\\' or - '\\134'. You may also have to escape line feeds and - carriage return if your interface automatically translates these. It - can store values of any length. Bytea is a non-standard - data type. - - - The storage requirement for data of these types is 4 bytes plus the actual string, and in case of character plus the padding. Long strings will actually be compressed by the system @@ -860,7 +843,7 @@ CREATE TABLE tablename (n in the data type declaration is actually larger than that. It wouldn't be very useful to change - this because with multi-byte character encodings the number of + this because with multibyte character encodings the number of characters and bytes can be quite different anyway.) @@ -957,6 +940,23 @@ SELECT b, char_length(b) FROM test2; + + Binary Data + + + The bytea data type allows storage of binary data, + specifically allowing storage of NULLs which are entered as + '\\000'. The first backslash is interpreted by the + single quotes, and the second is recognized by bytea and + precedes a three digit octal value. For a similar reason, a + backslash must be entered into a field as '\\\\' or + '\\134'. You may also have to escape line feeds and + carriage return if your interface automatically translates these. It + can store values of any length. Bytea is a non-standard + data type. + + + Date/Time Types @@ -1083,7 +1083,7 @@ SELECT b, char_length(b) FROM test2; - date + <type>date</type> date @@ -1260,7 +1260,7 @@ SELECT b, char_length(b) FROM test2; - time [ without time zone ] + <type>time [ without time zone ]</type> time @@ -1328,7 +1328,7 @@ SELECT b, char_length(b) FROM test2; - time with time zone + <type>time with time zone</type> This type is defined by SQL92, but the definition exhibits @@ -1382,7 +1382,7 @@ SELECT b, char_length(b) FROM test2; - timestamp + <type>timestamp</type> timestamp @@ -1442,7 +1442,7 @@ January 8 04:05:06 1999 PST - interval + <type>interval</type> interval @@ -1664,7 +1664,7 @@ January 8 04:05:06 1999 PST - The PGDATESTYLE environment variable used by the frontend libpq + The PGDATESTYLE environment variable used by the frontend libpq on session start-up. @@ -1753,13 +1753,13 @@ January 8 04:05:06 1999 PST - The TZ environment variable is used by the backend directly + The TZ environment variable is used by the backend directly on postmaster start-up as the default time zone. - The PGTZ environment variable, if set at the client, is used by libpq + The PGTZ environment variable, if set at the client, is used by libpq to send a SET TIME ZONE command to the backend upon connection. @@ -1796,7 +1796,7 @@ January 8 04:05:06 1999 PST If the runtime option AUSTRALIAN_TIMEZONES is set then CST and EST refer to - Australian timezones, not American ones. + Australian time zones, not American ones. @@ -1939,49 +1939,49 @@ SELECT * FROM test1 WHERE a; - point + point 16 bytes (x,y) Point in space - line + line 32 bytes ((x1,y1),(x2,y2)) Infinite line - lseg + lseg 32 bytes ((x1,y1),(x2,y2)) Finite line segment - box + box 32 bytes ((x1,y1),(x2,y2)) Rectangular box - path + path 4+32n bytes ((x1,y1),...) Closed path (similar to polygon) - path + path 4+32n bytes [(x1,y1),...] Open path - polygon + polygon 4+32n bytes ((x1,y1),...) Polygon (similar to closed path) - circle + circle 24 bytes <(x,y),r> Circle (center and radius) @@ -2296,21 +2296,21 @@ SELECT * FROM test1 WHERE a; - cidr + cidr 12 bytes IP networks valid IPv4 networks - inet + inet 12 bytes IP hosts and networks valid IPv4 hosts or networks - macaddr + macaddr 6 bytes MAC addresses customary formats @@ -2386,9 +2386,9 @@ SELECT * FROM test1 WHERE a; - CIDR Input - CIDR Displayed - abbrev(CIDR) + CIDR Input + CIDR Displayed + abbrev(CIDR) diff --git a/doc/src/sgml/datetime.sgml b/doc/src/sgml/datetime.sgml index abd59e5600..47a31b8953 100644 --- a/doc/src/sgml/datetime.sgml +++ b/doc/src/sgml/datetime.sgml @@ -1,5 +1,5 @@ @@ -225,7 +225,7 @@ Date/time details DNT +1:00 - Dansk Normal Tid + Dansk Normal Tid FST diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 39996e54c3..55294ccfb7 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -1,4 +1,4 @@ - + Functions and Operators @@ -496,163 +496,163 @@ - abs(x) + abs(x) (same as x) absolute value - abs(-17.4) + abs(-17.4) 17.4 - cbrt(dp) + cbrt(dp) dp cube root - cbrt(27.0) + cbrt(27.0) 3 - ceil(numeric) + ceil(numeric) numeric smallest integer not less than argument - ceil(-42.8) + ceil(-42.8) -42 - degrees(dp) + degrees(dp) dp radians to degrees - degrees(0.5) + degrees(0.5) 28.6478897565412 - exp(dp) + exp(dp) dp exponential - exp(1.0) + exp(1.0) 2.71828182845905 - floor(numeric) + floor(numeric) numeric largest integer not greater than argument - floor(-42.8) + floor(-42.8) -43 - ln(dp) + ln(dp) dp natural logarithm - ln(2.0) + ln(2.0) 0.693147180559945 - log(dp) + log(dp) dp base 10 logarithm - log(100.0) + log(100.0) 2 - log(b numeric, + log(b numeric, x numeric) numeric logarithm to base b - log(2.0, 64.0) + log(2.0, 64.0) 6.0000000000 - mod(y, x) + mod(y, x) (same as argument types) remainder of y/x - mod(9,4) + mod(9,4) 1 - pi() + pi() dp Pi constant - pi() + pi() 3.14159265358979 - pow(e dp, + pow(e dp, n dp) dp raise a number to exponent e - pow(9.0, 3.0) + pow(9.0, 3.0) 729 - radians(dp) + radians(dp) dp degrees to radians - radians(45.0) + radians(45.0) 0.785398163397448 - random() + random() dp value between 0.0 to 1.0 - random() + random() - round(dp) + round(dp) dp round to nearest integer - round(42.4) + round(42.4) 42 - round(v numeric, s integer) + round(v numeric, s integer) numeric round to s decimal places - round(42.4382, 2) + round(42.4382, 2) 42.44 - sqrt(dp) + sqrt(dp) dp square root - sqrt(2.0) + sqrt(2.0) 1.4142135623731 - trunc(dp) + trunc(dp) dp truncate toward zero - trunc(42.8) + trunc(42.8) 42 - trunc(numeric, s integer) + trunc(numeric, s integer) numeric truncate to s decimal places - trunc(42.4382, 2) + trunc(42.4382, 2) 42.43 @@ -661,7 +661,7 @@ - In the table above, "dp" indicates double precision. + In the table above, dp indicates double precision. The functions exp, ln, log, pow, round (1 argument), sqrt, @@ -688,42 +688,42 @@ - acos(x) + acos(x) inverse cosine - asin(x) + asin(x) inverse sine - atan(x) + atan(x) inverse tangent - atan2(x, y) + atan2(x, y) inverse tangent of y/x - cos(x) + cos(x) cosine - cot(x) + cot(x) cotangent - sin(x) + sin(x) sine - tan(x) + tan(x) tangent @@ -786,20 +786,20 @@ concatenation - 'Postgre' || 'SQL' - PostgreSQL + 'Postgre' || 'SQL' + PostgreSQL - bit_length(string) + bit_length(string) integer number of bits in string - bit_length('jose') - 32 + bit_length('jose') + 32 - char_length(string) or character_length(string) + char_length(string) or character_length(string) integer length of string @@ -813,36 +813,36 @@ character strings, length - char_length('jose') - 4 + char_length('jose') + 4 - lower(string) + lower(string) text Convert string to lower case. - lower('TOM') - tom + lower('TOM') + tom - octet_length(string) + octet_length(string) integer number of bytes in string - octet_length('jose') - 4 + octet_length('jose') + 4 - position(substring in string) + position(substring in string) integer location of specified substring - position('om' in 'Thomas') - 3 + position('om' in 'Thomas') + 3 - substring(string from integer for integer) + substring(string from integer for integer) text extract substring @@ -850,13 +850,13 @@ substring - substring('Thomas' from 2 for 3) - hom + substring('Thomas' from 2 for 3) + hom - trim(leading | trailing | both + trim(leading | trailing | both characters from string) @@ -866,16 +866,16 @@ characters (a space by default) from the beginning/end/both ends of the string. - trim(both 'x' from 'xTomx') - Tom + trim(both 'x' from 'xTomx') + Tom - upper(string) + upper(string) text Convert string to upper case. - upper('tom') - TOM + upper('tom') + TOM @@ -902,58 +902,62 @@ - ascii(text) + ascii(text) integer Returns the ASCII code of the first character of the argument. - ascii('x') - 120 + ascii('x') + 120 - btrim(string text, trim text) + btrim(string text, trim text) text Remove (trim) the longest string consisting only of characters in trim from the start and end of string. - btrim('xyxtrimyyx','xy') - trim + btrim('xyxtrimyyx','xy') + trim - chr(integer) + chr(integer) text Returns the character with the given ASCII code. - chr(65) - A + chr(65) + A - convert(string text, - [src_encoding name,] - dest_encoding name) + + convert(string text, + src_encoding name, + dest_encoding name) + text - Converts string using dest_encoding. - The original encoding is specified by src_encoding. - If src_encoding is omitted, database encoding - is assumed. + + Converts string using dest_encoding. + The original encoding is specified by + src_encoding. If + src_encoding is omitted, database + encoding is assumed. - convert('text_in_unicode','UNICODE','LATIN1') - text_in_unicode (represented in ISO-8859-1) + convert('text_in_unicode', 'UNICODE', 'LATIN1') + text_in_unicode represented in ISO 8859-1 - initcap(text) + initcap(text) text Converts first letter of each word (whitespace separated) to upper case. - initcap('hi thomas') - Hi Thomas + initcap('hi thomas') + Hi Thomas - lpad(string text, + lpad(string text, length integer , fill text) @@ -966,32 +970,32 @@ length then it is truncated (on the right). - lpad('hi', 5, 'xy') - xyxhi + lpad('hi', 5, 'xy') + xyxhi - ltrim(string text, trim text) + ltrim(string text, trim text) text Removes the longest string containing only characters from trim from the start of the string. - ltrim('zzzytrim','xyz') - trim + ltrim('zzzytrim','xyz') + trim - repeat(text, integer) + repeat(text, integer) text Repeat text a number of times. - repeat('Pg', 4) - PgPgPgPg + repeat('Pg', 4) + PgPgPgPg - rpad(string text, + rpad(string text, length integer , fill text) @@ -1003,23 +1007,23 @@ string is already longer than length then it is truncated. - rpad('hi', 5, 'xy') - hixyx + rpad('hi', 5, 'xy') + hixyx - rtrim(string text, trim text) + rtrim(string text, trim text) text Removes the longest string containing only characters from trim from the end of the string. - rtrim('trimxxxx','x') - trim + rtrim('trimxxxx','x') + trim - strpos(string, substring) + strpos(string, substring) text Locates specified substring. (same as @@ -1027,31 +1031,31 @@ string), but note the reversed argument order) - strpos('high','ig') - 2 + strpos('high','ig') + 2 - substr(string, from , count) + substr(string, from , count) text Extracts specified substring. (same as substring(string from from for count)) - substr('alphabet', 3, 2) - ph + substr('alphabet', 3, 2) + ph - to_ascii(text , encoding) + to_ascii(text , encoding) text Converts text from multibyte encoding to ASCII. - to_ascii('Karel') - Karel + to_ascii('Karel') + Karel - translate(string text, + translate(string text, from text, to text) @@ -1062,27 +1066,27 @@ the corresponding character in the to set. - translate('12345', '14', 'ax') - a23x5 + translate('12345', '14', 'ax') + a23x5 - encode(data bytea, + encode(data bytea, type text) text - Encodes binary data to ascii-only representation. Supported + Encodes binary data to ASCII-only representation. Supported types are: 'base64', 'hex'. - encode('123\\000\\001', 'base64') - MTIzAAE= + encode('123\\000\\001', 'base64') + MTIzAAE= - decode(string text, + decode(string text, type text) bytea @@ -1090,8 +1094,8 @@ Decodes binary data from string previously encoded with encode(). Parameter type is same as in encode(). - decode('MTIzAAE=', 'base64') - 123\000\001 + decode('MTIzAAE=', 'base64') + 123\000\001 @@ -1235,22 +1239,22 @@ ~ Matches regular expression, case sensitive - 'thomas' ~ '.*thomas.*' + 'thomas' ~ '.*thomas.*' ~* Matches regular expression, case insensitive - 'thomas' ~* '.*Thomas.*' + 'thomas' ~* '.*Thomas.*' !~ Does not match regular expression, case sensitive - 'thomas' !~ '.*Thomas.*' + 'thomas' !~ '.*Thomas.*' !~* Does not match regular expression, case insensitive - 'thomas' !~* '.*vadim.*' + 'thomas' !~* '.*vadim.*' @@ -1402,12 +1406,12 @@ Within a bracket expression, a collating element (a character, a - multi-character sequence that collates as if it were a single + multiple-character sequence that collates as if it were a single character, or a collating-sequence name for either) enclosed in [. and .] stands for the sequence of characters of that collating element. The sequence is a single element of the bracket expression's list. A bracket - expression containing a multi-character collating element can thus + expression containing a multiple-character collating element can thus match more than one character, e.g. if the collating sequence includes a ch collating element, then the RE [[.ch.]]*c matches the first five characters of @@ -1454,7 +1458,7 @@ characters. A word character is an alnum character (as defined by ctype3) or an underscore. This is an extension, compatible with but not - specified by POSIX 1003.2, and should be used with caution in + specified by POSIX 1003.2, and should be used with caution in software intended to be portable to other systems. @@ -1559,52 +1563,52 @@ - to_char(timestamp, text) - text - convert timestamp to string - to_char(timestamp 'now','HH12:MI:SS') + to_char(timestamp, text) + text + convert time stamp to string + to_char(timestamp 'now','HH12:MI:SS') - to_char(interval, text) - text + to_char(interval, text) + text convert interval to string - to_char(interval '15h 2m 12s','HH24:MI:SS') + to_char(interval '15h 2m 12s','HH24:MI:SS') - to_char(int, text) - text + to_char(int, text) + text convert int4/int8 to string - to_char(125, '999') + to_char(125, '999') - to_char(double precision, text) - text + to_char(double precision, text) + text convert real/double precision to string - to_char(125.8, '999D9') + to_char(125.8, '999D9') - to_char(numeric, text) - text + to_char(numeric, text) + text convert numeric to string - to_char(numeric '-125.8', '999D99S') + to_char(numeric '-125.8', '999D99S') - to_date(text, text) - date + to_date(text, text) + date convert string to date - to_date('05 Dec 2000', 'DD Mon YYYY') + to_date('05 Dec 2000', 'DD Mon YYYY') - to_timestamp(text, text) - timestamp - convert string to timestamp - to_timestamp('05 Dec 2000', 'DD Mon YYYY') + to_timestamp(text, text) + timestamp + convert string to time stamp + to_timestamp('05 Dec 2000', 'DD Mon YYYY') - to_number(text, text) - numeric + to_number(text, text) + numeric convert string to numeric - to_number('12,454.8-', '99G999D9S') + to_number('12,454.8-', '99G999D9S') @@ -1632,175 +1636,175 @@ - HH + HH hour of day (01-12) - HH12 + HH12 hour of day (01-12) - HH24 + HH24 hour of day (00-23) - MI + MI minute (00-59) - SS + SS second (00-59) - MS + MS millisecond (000-999) - US + US microsecond (000000-999999) - SSSS + SSSS seconds past midnight (0-86399) - AM or A.M. or PM or P.M. + AM or A.M. or PM or P.M. meridian indicator (upper case) - am or a.m. or pm or p.m. + am or a.m. or pm or p.m. meridian indicator (lower case) - Y,YYY + Y,YYY year (4 and more digits) with comma - YYYY + YYYY year (4 and more digits) - YYY + YYY last 3 digits of year - YY + YY last 2 digits of year - Y + Y last digit of year - BC or B.C. or AD or A.D. - year indicator (upper case) + BC or B.C. or AD or A.D. + era indicator (upper case) - bc or b.c. or ad or a.d. - year indicator (lower case) + bc or b.c. or ad or a.d. + era indicator (lower case) - MONTH + MONTH full upper case month name (blank-padded to 9 chars) - Month + Month full mixed case month name (blank-padded to 9 chars) - month + month full lower case month name (blank-padded to 9 chars) - MON + MON abbreviated upper case month name (3 chars) - Mon + Mon abbreviated mixed case month name (3 chars) - mon + mon abbreviated lower case month name (3 chars) - MM + MM month number (01-12) - DAY + DAY full upper case day name (blank-padded to 9 chars) - Day + Day full mixed case day name (blank-padded to 9 chars) - day + day full lower case day name (blank-padded to 9 chars) - DY + DY abbreviated upper case day name (3 chars) - Dy + Dy abbreviated mixed case day name (3 chars) - dy + dy abbreviated lower case day name (3 chars) - DDD + DDD day of year (001-366) - DD + DD day of month (01-31) - D + D day of week (1-7; SUN=1) - W + W week of month (1-5) where first week start on the first day of the month - WW + WW week number of year (1-53) where first week start on the first day of the year - IW + IW ISO week number of year (The first Thursday of the new year is in week 1.) - CC + CC century (2 digits) - J + J Julian Day (days since January 1, 4712 BC) - Q + Q quarter - RM + RM month in Roman Numerals (I-XII; I=January) - upper case - rm + rm month in Roman Numerals (I-XII; I=January) - lower case - TZ + TZ timezone name - upper case - tz + tz timezone name - lower case @@ -1830,27 +1834,27 @@ FM prefix fill mode (suppress padding blanks and zeroes) - FMMonth + FMMonth TH suffix add upper-case ordinal number suffix - DDTH + DDTH th suffix add lower-case ordinal number suffix - DDth + DDth FX prefix - FiXed format global option (see below) - FX Month DD Day + Fixed format global option (see below) + FX Month DD Day SP suffix spell mode (not yet implemented) - DDSP + DDSP @@ -1913,8 +1917,8 @@ - YYYY conversion from string to timestamp or - date is restricted if you use a year with more than 4 digits. You must + YYYY conversion from string to timestamp or + date is restricted if you use a year with more than 4 digits. You must use some non-digit character or template after YYYY, otherwise the year is always interpreted as 4 digits. For example (with year 20000): @@ -1928,14 +1932,14 @@ - Millisecond MS and microcesond US - values are in conversion from string to timestamp used as part of + Millisecond MS and microsecond US + values are in conversion from string to time stamp used as part of second after decimal point. For example to_timestamp('12:3', 'SS:MS') is not 3 milliseconds, but 300, because the conversion count it as 12 + 0.3. It means for format 'SS:MS' is '12:3' or '12:30' or '12:300' same - number of miliceconds. For the three milliseconds must be used - '12:003' that the counversion count as + number of milliseconds. For the three milliseconds must be used + '12:003' that the conversion count as 12 + 0.003 = 12.003 seconds . Here is a more complex example: to_timestamp('15:12:02.020.001230','HH:MI:SS.MS.US') @@ -1957,68 +1961,68 @@ - 9 + 9 value with the specified number of digits - 0 + 0 value with leading zeros - . (period) + . (period) decimal point - , (comma) + , (comma) group (thousand) separator - PR + PR negative value in angle brackets - S + S negative value with minus sign (uses locale) - L + L currency symbol (uses locale) - D + D decimal point (uses locale) - G + G group separator (uses locale) - MI + MI minus sign in specified position (if number < 0) - PL + PL plus sign in specified position (if number > 0) - SG + SG plus/minus sign in specified position - RN + RN roman numeral (input between 1 and 3999) - TH or th + TH or th convert to ordinal number - V + V shift n digits (see notes) - EEEE + EEEE scientific numbers (not supported yet) @@ -2093,135 +2097,135 @@ - to_char(now(),'Day, DD HH12:MI:SS') + to_char(now(),'Day, DD HH12:MI:SS') 'Tuesday , 06 05:39:18' - to_char(now(),'FMDay, FMDD HH12:MI:SS') + to_char(now(),'FMDay, FMDD HH12:MI:SS') 'Tuesday, 6 05:39:18' - to_char(-0.1,'99.99') + to_char(-0.1,'99.99') ' -.10' - to_char(-0.1,'FM9.99') + to_char(-0.1,'FM9.99') '-.1' - to_char(0.1,'0.9') + to_char(0.1,'0.9') ' 0.1' - to_char(12,'9990999.9') + to_char(12,'9990999.9') ' 0012.0' - to_char(12,'FM9990999.9') + to_char(12,'FM9990999.9') '0012' - to_char(485,'999') + to_char(485,'999') ' 485' - to_char(-485,'999') + to_char(-485,'999') '-485' - to_char(485,'9 9 9') + to_char(485,'9 9 9') ' 4 8 5' - to_char(1485,'9,999') + to_char(1485,'9,999') ' 1,485' - to_char(1485,'9G999') + to_char(1485,'9G999') ' 1 485' - to_char(148.5,'999.999') + to_char(148.5,'999.999') ' 148.500' - to_char(148.5,'999D999') + to_char(148.5,'999D999') ' 148,500' - to_char(3148.5,'9G999D999') + to_char(3148.5,'9G999D999') ' 3 148,500' - to_char(-485,'999S') + to_char(-485,'999S') '485-' - to_char(-485,'999MI') + to_char(-485,'999MI') '485-' - to_char(485,'999MI') + to_char(485,'999MI') '485' - to_char(485,'PL999') + to_char(485,'PL999') '+485' - to_char(485,'SG999') + to_char(485,'SG999') '+485' - to_char(-485,'SG999') + to_char(-485,'SG999') '-485' - to_char(-485,'9SG99') + to_char(-485,'9SG99') '4-85' - to_char(-485,'999PR') + to_char(-485,'999PR') '<485>' - to_char(485,'L999') + to_char(485,'L999') 'DM 485 - to_char(485,'RN') + to_char(485,'RN') ' CDLXXXV' - to_char(485,'FMRN') + to_char(485,'FMRN') 'CDLXXXV' - to_char(5.2,'FMRN') + to_char(5.2,'FMRN') V - to_char(482,'999th') + to_char(482,'999th') ' 482nd' - to_char(485, '"Good number:"999') + to_char(485, '"Good number:"999') 'Good number: 485' - to_char(485.8,'"Pre:"999" Post:" .999') + to_char(485.8,'"Pre:"999" Post:" .999') 'Pre: 485 Post: .800' - to_char(12,'99V999') + to_char(12,'99V999') ' 12000' - to_char(12.4,'99V999') + to_char(12.4,'99V999') ' 12400' - to_char(12.45, '99V9') + to_char(12.45, '99V9') ' 125' @@ -2259,24 +2263,24 @@ - age(timestamp) - interval + age(timestamp) + interval Subtract from today - age(timestamp '1957-06-13') - 43 years 8 mons 3 days + age(timestamp '1957-06-13') + 43 years 8 mons 3 days - age(timestamp, timestamp) - interval + age(timestamp, timestamp) + interval Subtract arguments - age('2001-04-10', timestamp '1957-06-13') - 43 years 9 mons 27 days + age('2001-04-10', timestamp '1957-06-13') + 43 years 9 mons 27 days - current_date - date + current_date + date Today's date; see below @@ -2285,8 +2289,8 @@ - current_time - time + current_time + time Time of day; see below @@ -2295,8 +2299,8 @@ - current_timestamp - timestamp + current_timestamp + timestamp date and time; see also below @@ -2305,76 +2309,76 @@ - date_part(text, timestamp) - double precision + date_part(text, timestamp) + double precision Get subfield (equivalent to extract); see also below - date_part('hour', timestamp '2001-02-16 20:38:40') - 20 + date_part('hour', timestamp '2001-02-16 20:38:40') + 20 - date_part(text, interval) - double precision + date_part(text, interval) + double precision Get subfield (equivalent to extract); see also below - date_part('month', interval '2 years 3 months') - 3 + date_part('month', interval '2 years 3 months') + 3 - date_trunc(text, timestamp) - timestamp + date_trunc(text, timestamp) + timestamp Truncate to specified precision; see also below - date_trunc('hour', timestamp '2001-02-16 20:38:40') - 2001-02-16 20:00:00+00 + date_trunc('hour', timestamp '2001-02-16 20:38:40') + 2001-02-16 20:00:00+00 - extract(field from timestamp) - double precision + extract(field from timestamp) + double precision Get subfield; see also below - extract(hour from timestamp '2001-02-16 20:38:40') - 20 + extract(hour from timestamp '2001-02-16 20:38:40') + 20 - extract(field from interval) - double precision + extract(field from interval) + double precision Get subfield; see also below - extract(month from interval '2 years 3 months') - 3 + extract(month from interval '2 years 3 months') + 3 - isfinite(timestamp) - boolean + isfinite(timestamp) + boolean Test for finite time stamp (neither invalid nor infinity) - isfinite(timestamp '2001-02-16 21:28:30') - true + isfinite(timestamp '2001-02-16 21:28:30') + true - isfinite(interval) - boolean + isfinite(interval) + boolean Test for finite interval - isfinite(interval '4 hours') - true + isfinite(interval '4 hours') + true - now() - timestamp + now() + timestamp Current date and time (equivalent to current_timestamp); see also below @@ -2384,29 +2388,29 @@ - timeofday() - text + timeofday() + text High-precision date and time; see also below - timeofday() - Wed Feb 21 17:01:13.000126 2001 EST + timeofday() + Wed Feb 21 17:01:13.000126 2001 EST - timestamp(date) - timestamp - Date to timestamp - timestamp(date '2000-12-25') - 2000-12-25 00:00:00 + timestamp(date) + timestamp + date to timestamp + timestamp(date '2000-12-25') + 2000-12-25 00:00:00 - timestamp(date, time) - timestamp - Date and time to a timestamp - timestamp(date '1998-02-24',time '23:07') - 1998-02-24 23:07:00 + timestamp(date, time) + timestamp + date and time to timestamp + timestamp(date '1998-02-24',time '23:07') + 1998-02-24 23:07:00 @@ -2436,7 +2440,7 @@ EXTRACT (field FROM source - century + century The year field divided by 100 @@ -2458,7 +2462,7 @@ SELECT EXTRACT(CENTURY FROM TIMESTAMP '2001-02-16 20:38:40'); - day + day The day (of the month) field (1 - 31) @@ -2474,7 +2478,7 @@ SELECT EXTRACT(DAY FROM TIMESTAMP '2001-02-16 20:38:40'); - decade + decade The year field divided by 10 @@ -2490,7 +2494,7 @@ SELECT EXTRACT(DECADE FROM TIMESTAMP '2001-02-16 20:38:40'); - dow + dow The day of the week (0 - 6; Sunday is 0) (for @@ -2507,7 +2511,7 @@ SELECT EXTRACT(DOW FROM TIMESTAMP '2001-02-16 20:38:40'); - doy + doy The day of the year (1 - 365/366) (for timestamp values only) @@ -2522,7 +2526,7 @@ SELECT EXTRACT(DOY FROM TIMESTAMP '2001-02-16 20:38:40'); - epoch + epoch For date and timestamp values, the @@ -2544,7 +2548,7 @@ SELECT EXTRACT(EPOCH FROM INTERVAL '5 days 3 hours'); - hour + hour The hour field (0 - 23) @@ -2560,7 +2564,7 @@ SELECT EXTRACT(HOUR FROM TIMESTAMP '2001-02-16 20:38:40'); - microseconds + microseconds The seconds field, including fractional parts, multiplied by 1 @@ -2577,7 +2581,7 @@ SELECT EXTRACT(MICROSECONDS FROM TIME '17:12:28.5'); - millennium + millennium The year field divided by 1000 @@ -2599,7 +2603,7 @@ SELECT EXTRACT(MILLENNIUM FROM TIMESTAMP '2001-02-16 20:38:40'); - milliseconds + milliseconds The seconds field, including fractional parts, multiplied by @@ -2616,7 +2620,7 @@ SELECT EXTRACT(MILLISECONDS FROM TIME '17:12:28.5'); - minute + minute The minutes field (0 - 59) @@ -2632,7 +2636,7 @@ SELECT EXTRACT(MINUTE FROM TIMESTAMP '2001-02-16 20:38:40'); - month + month For timestamp values, the number of the month @@ -2656,7 +2660,7 @@ SELECT EXTRACT(MONTH FROM INTERVAL '2 years 13 months'); - quarter + quarter The quarter of the year (1 - 4) that the day is in (for @@ -2673,7 +2677,7 @@ SELECT EXTRACT(QUARTER FROM TIMESTAMP '2001-02-16 20:38:40'); - second + second The seconds field, including fractional parts (0 - @@ -2694,7 +2698,7 @@ SELECT EXTRACT(SECOND FROM TIME '17:12:28.5'); - week + week From a timestamp value, calculate the number of @@ -2715,7 +2719,7 @@ SELECT EXTRACT(WEEK FROM TIMESTAMP '2001-02-16 20:38:40'); - year + year The year field @@ -2887,7 +2891,7 @@ SELECT timeofday(); good as microseconds (depending on your platform); the other functions rely on time(2) which is restricted to one-second resolution. For historical reasons, timeofday() - returns its result as a text string rather than a timestamp value. + returns its result as a text string rather than a timestamp value. @@ -2899,7 +2903,7 @@ SELECT timeofday(); - All the date/time datatypes also accept the special literal value + All the date/time data types also accept the special literal value now to specify the current date and time. Thus, the following three all return the same result: @@ -2911,7 +2915,7 @@ SELECT TIMESTAMP 'now'; You do not want to use the third form when specifying a DEFAULT value while creating a table. The system will convert now - to a timestamp as soon as the constant is parsed, so that when + to a timestamp as soon as the constant is parsed, so that when the default value is needed, the time of the table creation would be used! The first two forms will not be evaluated until the default value is used, @@ -2928,8 +2932,10 @@ SELECT TIMESTAMP 'now'; Geometric Functions and Operators - The geometric types point, box, lseg, line, path, polygon, and - circle have a large set of native support functions and operators. + The geometric types point, box, + lseg, line, path, + polygon, and circle have a large set of + native support functions and operators. @@ -2946,122 +2952,122 @@ SELECT TIMESTAMP 'now'; + Translation - box '((0,0),(1,1))' + point '(2.0,0)' + box '((0,0),(1,1))' + point '(2.0,0)' - Translation - box '((0,0),(1,1))' - point '(2.0,0)' + box '((0,0),(1,1))' - point '(2.0,0)' * Scaling/rotation - box '((0,0),(1,1))' * point '(2.0,0)' + box '((0,0),(1,1))' * point '(2.0,0)' / Scaling/rotation - box '((0,0),(2,2))' / point '(2.0,0)' + box '((0,0),(2,2))' / point '(2.0,0)' # Intersection - '((1,-1),(-1,1))' # '((1,1),(-1,-1))' + '((1,-1),(-1,1))' # '((1,1),(-1,-1))' # Number of points in polygon - # '((1,0),(0,1),(-1,0))' + # '((1,0),(0,1),(-1,0))' ## Point of closest proximity - point '(0,0)' ## lseg '((2,0),(0,2))' + point '(0,0)' ## lseg '((2,0),(0,2))' && Overlaps? - box '((0,0),(1,1))' && box '((0,0),(2,2))' + box '((0,0),(1,1))' && box '((0,0),(2,2))' &< Overlaps to left? - box '((0,0),(1,1))' &< box '((0,0),(2,2))' + box '((0,0),(1,1))' &< box '((0,0),(2,2))' &> Overlaps to right? - box '((0,0),(3,3))' &> box '((0,0),(2,2))' + box '((0,0),(3,3))' &> box '((0,0),(2,2))' <-> Distance between - circle '((0,0),1)' <-> circle '((5,0),1)' + circle '((0,0),1)' <-> circle '((5,0),1)' << Left of? - circle '((0,0),1)' << circle '((5,0),1)' + circle '((0,0),1)' << circle '((5,0),1)' <^ Is below? - circle '((0,0),1)' <^ circle '((0,5),1)' + circle '((0,0),1)' <^ circle '((0,5),1)' >> Is right of? - circle '((5,0),1)' >> circle '((0,0),1)' + circle '((5,0),1)' >> circle '((0,0),1)' >^ Is above? - circle '((0,5),1)' >^ circle '((0,0),1)' + circle '((0,5),1)' >^ circle '((0,0),1)' ?# Intersects or overlaps - lseg '((-1,0),(1,0))' ?# box '((-2,-2),(2,2))'; + lseg '((-1,0),(1,0))' ?# box '((-2,-2),(2,2))' ?- Is horizontal? - point '(1,0)' ?- point '(0,0)' + point '(1,0)' ?- point '(0,0)' ?-| Is perpendicular? - lseg '((0,0),(0,1))' ?-| lseg '((0,0),(1,0))' + lseg '((0,0),(0,1))' ?-| lseg '((0,0),(1,0))' @-@ Length or circumference - @-@ path '((0,0),(1,0))' + @-@ path '((0,0),(1,0))' ?| Is vertical? - point '(0,1)' ?| point '(0,0)' + point '(0,1)' ?| point '(0,0)' ?|| Is parallel? - lseg '((-1,0),(1,0))' ?|| lseg '((-1,2),(1,2))' + lseg '((-1,0),(1,0))' ?|| lseg '((-1,2),(1,2))' @ Contained or on - point '(1,1)' @ circle '((0,0),2)' + point '(1,1)' @ circle '((0,0),2)' @@ Center of - @@ circle '((0,0),10)' + @@ circle '((0,0),10)' ~= Same as - polygon '((0,0),(1,1))' ~= polygon '((1,1),(0,0))' + polygon '((0,0),(1,1))' ~= polygon '((1,1),(0,0))' @@ -3080,91 +3086,91 @@ SELECT TIMESTAMP 'now'; - area(object) - double precision + area(object) + double precision area of item - area(box '((0,0),(1,1))') + area(box '((0,0),(1,1))') - box(box, box) - box + box(box, box) + box intersection box - box(box '((0,0),(1,1))',box '((0.5,0.5),(2,2))') + box(box '((0,0),(1,1))',box '((0.5,0.5),(2,2))') - center(object) - point + center(object) + point center of item - center(box '((0,0),(1,2))') + center(box '((0,0),(1,2))') - diameter(circle) - double precision + diameter(circle) + double precision diameter of circle - diameter(circle '((0,0),2.0)') + diameter(circle '((0,0),2.0)') - height(box) - double precision + height(box) + double precision vertical size of box - height(box '((0,0),(1,1))') + height(box '((0,0),(1,1))') - isclosed(path) - boolean + isclosed(path) + boolean a closed path? - isclosed(path '((0,0),(1,1),(2,0))') + isclosed(path '((0,0),(1,1),(2,0))') - isopen(path) - boolean + isopen(path) + boolean an open path? - isopen(path '[(0,0),(1,1),(2,0)]') + isopen(path '[(0,0),(1,1),(2,0)]') - length(object) - double precision + length(object) + double precision length of item - length(path '((-1,0),(1,0))') + length(path '((-1,0),(1,0))') - pclose(path) - path + pclose(path) + path convert path to closed - popen(path '[(0,0),(1,1),(2,0)]') + popen(path '[(0,0),(1,1),(2,0)]') - - point(lseg,lseg) - point + point(lseg,lseg) + point intersection - point(lseg '((-1,0),(1,0))',lseg '((-2,-2),(2,2))') + point(lseg '((-1,0),(1,0))',lseg '((-2,-2),(2,2))') ---> +]]> - npoint(path) - int4 + npoint(path) + integer number of points - npoints(path '[(0,0),(1,1),(2,0)]') + npoints(path '[(0,0),(1,1),(2,0)]') - popen(path) - path + popen(path) + path convert path to open path - popen(path '((0,0),(1,1),(2,0))') + popen(path '((0,0),(1,1),(2,0))') - radius(circle) - double precision + radius(circle) + double precision radius of circle - radius(circle '((0,0),2.0)') + radius(circle '((0,0),2.0)') - width(box) - double precision + width(box) + double precision horizontal size - width(box '((0,0),(1,1))') + width(box '((0,0),(1,1))') @@ -3184,94 +3190,94 @@ Not defined by this name. Implements the intersection operator '#' - box(circle) - box + box(circle) + box circle to box - box(circle '((0,0),2.0)') + box(circle '((0,0),2.0)') - box(point, point) - box + box(point, point) + box points to box - box(point '(0,0)', point '(1,1)') + box(point '(0,0)', point '(1,1)') - box(polygon) - box + box(polygon) + box polygon to box - box(polygon '((0,0),(1,1),(2,0))') + box(polygon '((0,0),(1,1),(2,0))') - circle(box) - circle + circle(box) + circle to circle - circle(box '((0,0),(1,1))') + circle(box '((0,0),(1,1))') - circle(point, double precision) - circle + circle(point, double precision) + circle point to circle - circle(point '(0,0)', 2.0) + circle(point '(0,0)', 2.0) - lseg(box) - lseg + lseg(box) + lseg box diagonal to lseg - lseg(box '((-1,0),(1,0))') + lseg(box '((-1,0),(1,0))') - lseg(point, point) - lseg + lseg(point, point) + lseg points to lseg - lseg(point '(-1,0)', point '(1,0)') + lseg(point '(-1,0)', point '(1,0)') - path(polygon) - point + path(polygon) + point polygon to path - path(polygon '((0,0),(1,1),(2,0))') + path(polygon '((0,0),(1,1),(2,0))') - point(circle) - point + point(circle) + point center - point(circle '((0,0),2.0)') + point(circle '((0,0),2.0)') - point(lseg, lseg) - point + point(lseg, lseg) + point intersection - point(lseg '((-1,0),(1,0))', lseg '((-2,-2),(2,2))') + point(lseg '((-1,0),(1,0))', lseg '((-2,-2),(2,2))') - point(polygon) - point + point(polygon) + point center - point(polygon '((0,0),(1,1),(2,0))') + point(polygon '((0,0),(1,1),(2,0))') - polygon(box) - polygon + polygon(box) + polygon 12 point polygon - polygon(box '((0,0),(1,1))') + polygon(box '((0,0),(1,1))') - polygon(circle) - polygon + polygon(circle) + polygon 12-point polygon - polygon(circle '((0,0),2.0)') + polygon(circle '((0,0),2.0)') - polygon(npts, circle) - polygon + polygon(npts, circle) + polygon npts polygon - polygon(12, circle '((0,0),2.0)') + polygon(12, circle '((0,0),2.0)') - polygon(path) - polygon + polygon(path) + polygon path to polygon - polygon(path '((0,0),(1,1),(2,0))') + polygon(path '((0,0),(1,1),(2,0))') @@ -3298,52 +3304,52 @@ Not defined by this name. Implements the intersection operator '#' < Less than - inet '192.168.1.5' < inet '192.168.1.6' + inet '192.168.1.5' < inet '192.168.1.6' <= Less than or equal - inet '192.168.1.5' <= inet '192.168.1.5' + inet '192.168.1.5' <= inet '192.168.1.5' = Equals - inet '192.168.1.5' = inet '192.168.1.5' + inet '192.168.1.5' = inet '192.168.1.5' >= Greater or equal - inet '192.168.1.5' >= inet '192.168.1.5' + inet '192.168.1.5' >= inet '192.168.1.5' > Greater - inet '192.168.1.5' > inet '192.168.1.4' + inet '192.168.1.5' > inet '192.168.1.4' <> Not equal - inet '192.168.1.5' <> inet '192.168.1.4' + inet '192.168.1.5' <> inet '192.168.1.4' << is contained within - inet '192.168.1.5' << inet '192.168.1/24' + inet '192.168.1.5' << inet '192.168.1/24' <<= is contained within or equals - inet '192.168.1/24' <<= inet '192.168.1/24' + inet '192.168.1/24' <<= inet '192.168.1/24' >> contains - inet'192.168.1/24' >> inet '192.168.1.5' + inet'192.168.1/24' >> inet '192.168.1.5' >>= contains or equals - inet '192.168.1/24' >>= inet '192.168.1/24' + inet '192.168.1/24' >>= inet '192.168.1/24' @@ -3374,60 +3380,60 @@ Not defined by this name. Implements the intersection operator '#' - broadcast(inet) - inet + broadcast(inet) + inet broadcast address for network - broadcast('192.168.1.5/24') - 192.168.1.255/24 + broadcast('192.168.1.5/24') + 192.168.1.255/24 - host(inet) - text + host(inet) + text extract IP address as text - host('192.168.1.5/24') - 192.168.1.5 + host('192.168.1.5/24') + 192.168.1.5 - masklen(inet) - integer + masklen(inet) + integer extract netmask length - masklen('192.168.1.5/24') - 24 + masklen('192.168.1.5/24') + 24 - set_masklen(inet,integer) - inet + set_masklen(inet,integer) + inet set netmask length for inet value - set_masklen('192.168.1.5/24',16) - 192.168.1.5/16 + set_masklen('192.168.1.5/24',16) + 192.168.1.5/16 - netmask(inet) - inet + netmask(inet) + inet construct netmask for network - netmask('192.168.1.5/24') - 255.255.255.0 + netmask('192.168.1.5/24') + 255.255.255.0 - network(inet) - cidr + network(inet) + cidr extract network part of address - network('192.168.1.5/24') - 192.168.1.0/24 + network('192.168.1.5/24') + 192.168.1.0/24 - text(inet) - text + text(inet) + text extract IP address and masklen as text - text(inet '192.168.1.5') - 192.168.1.5/32 + text(inet '192.168.1.5') + 192.168.1.5/32 - abbrev(inet) - text + abbrev(inet) + text extract abbreviated display as text - abbrev(cidr '10.1.0.0/16') - 10.1/16 + abbrev(cidr '10.1.0.0/16') + 10.1/16 @@ -3438,8 +3444,8 @@ Not defined by this name. Implements the intersection operator '#' cidr values as well. The host(), text(), and abbrev() functions are primarily intended to offer alternative display formats. You can cast a text - field to inet using normal casting syntax: inet(fieldname) or - fieldname::inet. + field to inet using normal casting syntax: inet(expression) or + colname::inet. @@ -3457,11 +3463,11 @@ Not defined by this name. Implements the intersection operator '#' - trunc(macaddr) - macaddr + trunc(macaddr) + macaddr set last 3 bytes to zero - trunc(macaddr '12:34:56:78:90:ab') - 12:34:56:00:00:00 + trunc(macaddr '12:34:56:78:90:ab') + 12:34:56:00:00:00 @@ -3722,18 +3728,18 @@ SELECT NULLIF(value, '(none)') ... - has_table_privilege(user, + has_table_privilege(user, table, access) - boolean + boolean does user have access to table - has_table_privilege(table, + has_table_privilege(table, access) - boolean + boolean does current user have access to table @@ -3747,7 +3753,7 @@ SELECT NULLIF(value, '(none)') ... has_table_privilege determines whether a user can access a table in a particular way. The user can be - specified by name or by usesysid, or if the argument is omitted + specified by name or by ID (pg_user.usesysid) or if the argument is omitted current_user is assumed. The table can be specified by name or by OID. (Thus, there are actually six variants of has_table_privilege, which can be distinguished by @@ -3811,13 +3817,13 @@ SELECT NULLIF(value, '(none)') ... - COUNT(*) + count(*) number of input values The return value is of type bigint. - COUNT(expression) + count(expression) Counts the input values for which the value of expression is not NULL. @@ -3826,7 +3832,7 @@ SELECT NULLIF(value, '(none)') ... - MAX(expression) + max(expression) the maximum value of expression across all input values Available for all numeric, string, and date/time types. The @@ -3835,7 +3841,7 @@ SELECT NULLIF(value, '(none)') ... - MIN(expression) + min(expression) the minimum value of expression across all input values Available for all numeric, string, and date/time types. The @@ -3844,7 +3850,7 @@ SELECT NULLIF(value, '(none)') ... - STDDEV(expression) + stddev(expression) the sample standard deviation of the input values @@ -3860,7 +3866,7 @@ SELECT NULLIF(value, '(none)') ... - SUM(expression) + sum(expression) sum of expression across all input values Summation is available on the following data types: @@ -3876,7 +3882,7 @@ SELECT NULLIF(value, '(none)') ... - VARIANCE(expression) + variance(expression) the sample variance of the input values diff --git a/doc/src/sgml/history.sgml b/doc/src/sgml/history.sgml index ae4f1cb4d2..c9580713a5 100644 --- a/doc/src/sgml/history.sgml +++ b/doc/src/sgml/history.sgml @@ -1,5 +1,5 @@ @@ -13,7 +13,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.13 2001/02/03 19:03:26 pet California at Berkeley. With over a decade of development behind it, PostgreSQL is the most advanced open-source database available anywhere, - offering multi-version concurrency control, supporting almost + offering multiversion concurrency control, supporting almost all SQL constructs (including subselects, transactions, and user-defined types and functions), and having a wide range of language bindings available (including C, C++, Java, Perl, Tcl, and Python). @@ -72,7 +72,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.13 2001/02/03 19:03:26 pet Finally, Illustra Information Technologies (since merged into - Informix) + Informix) picked up the code and commercialized it. Postgres became the primary data manager @@ -141,7 +141,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.13 2001/02/03 19:03:26 pet A new front-end library, libpgtcl, supported Tcl-based clients. A sample shell, - pgtclsh, provided new Tcl commands to interface + pgtclsh, provided new Tcl commands to interface tcl programs with the Postgres95 backend. @@ -211,7 +211,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.13 2001/02/03 19:03:26 pet - Table-level locking has been replaced with multi-version concurrency control, + Table-level locking has been replaced with multiversion concurrency control, which allows readers to continue reading consistent data during writer activity and enables hot backups from pg_dump while the database stays available for queries. diff --git a/doc/src/sgml/indices.sgml b/doc/src/sgml/indices.sgml index 1f746aa8b0..7ac0dedcfe 100644 --- a/doc/src/sgml/indices.sgml +++ b/doc/src/sgml/indices.sgml @@ -1,4 +1,4 @@ - + Indexes @@ -571,7 +571,7 @@ CREATE MEMSTORE ON table COLUMNS colsRDBMS provider gives you - - be it an index, my imaginary MEMSTORE command, or an intelligent + - be it an index, my imaginary MEMSTORE command, or an intelligent RDBMS that creates indexes without your knowledge based on the fact that you have sent it many queries based on a specific combination of keys... (It learns @@ -629,7 +629,7 @@ CREATE MEMSTORE ON table COLUMNS colsSQL types; they're super-hard problems with black-box extension types, diff --git a/doc/src/sgml/inherit.sgml b/doc/src/sgml/inherit.sgml index 562a5dfccc..a972242c4f 100644 --- a/doc/src/sgml/inherit.sgml +++ b/doc/src/sgml/inherit.sgml @@ -1,5 +1,5 @@ @@ -37,7 +37,7 @@ CREATE TABLE capitals ( - The inheritance hierarchy is a actually a directed acyclic graph. + The inheritance hierarchy is actually a directed acyclic graph. @@ -100,7 +100,7 @@ SELECT name, altitude In some cases you may wish to know which table a particular tuple originated from. There is a system column called - TABLEOID in each table which can tell you the + TABLEOID in each table which can tell you the originating table: diff --git a/doc/src/sgml/legal.sgml b/doc/src/sgml/legal.sgml index f430d55d32..32e0a8b1a7 100644 --- a/doc/src/sgml/legal.sgml +++ b/doc/src/sgml/legal.sgml @@ -1,5 +1,5 @@ @@ -42,7 +42,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/legal.sgml,v 1.10 2001/02/03 19:03:27 peter INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS-IS" BASIS, AND THE UNIVERSITY OF - CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTAINANCE, SUPPORT, + CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/doc/src/sgml/manage.sgml b/doc/src/sgml/manage.sgml index 8c363d536e..dfc85b0876 100644 --- a/doc/src/sgml/manage.sgml +++ b/doc/src/sgml/manage.sgml @@ -1,17 +1,15 @@ Managing a Database - - - This section is currently a thinly disguised copy of the - Tutorial. Needs to be augmented. + + This section is currently a thinly disguised copy of the + Tutorial. Needs to be augmented. - thomas 1998-01-12 - - + Although the site administrator is responsible for overall management @@ -89,9 +87,9 @@ ERROR: CREATE DATABASE: Permission denied. Alternate database locations are created and referenced by an environment variable which gives the absolute path to the intended storage location. This environment variable must have been defined before the postmaster was started - and the location it points to must be writable by the postgres administrator account. + and the location it points to must be writable by the administrator account. Consult with the site administrator - regarding preconfigured alternate database locations. + regarding preconfigured alternative database locations. Any valid environment variable name may be used to reference an alternate location, although using variable names with a prefix of PGDATA is recommended to avoid confusion @@ -101,7 +99,7 @@ ERROR: CREATE DATABASE: Permission denied. In previous versions of Postgres, - it was also permissable to use an absolute path name to specify + it was also permissible to use an absolute path name to specify an alternate storage location. Although the environment variable style of specification is to be preferred since it allows the site administrator more flexibility in @@ -181,7 +179,7 @@ enter, edit, and execute SQL commands. - writing a C program using the LIBPQ subroutine + writing a C program using the LIBPQ subroutine library. This allows you to submit SQL commands from C and get answers and status messages back to your program. This interface is discussed further @@ -213,7 +211,7 @@ mydb=> -This prompt indicates that psql is listening +This prompt indicates that psql is listening to you and that you can type SQL queries into a workspace maintained by the terminal monitor. The psql program responds to escape codes that begin @@ -235,7 +233,7 @@ mydb=> \g terminate your query with a semicolon, the "\g" is not necessary. psql will automatically process semicolon terminated queries. - To read queries from a file, say myFile, instead of + To read queries from a file, say myFile, instead of entering them interactively, type: mydb=> \i fileName @@ -247,7 +245,7 @@ mydb=> \q and psql will quit and return you to your command - shell. (For more escape codes, type \? at the psql + shell. (For more escape codes, type \? at the psql prompt.) White space (i.e., spaces, tabs and newlines) may be used freely in SQL queries. Single-line comments are denoted by diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml index 6d7013a46a..407feb593f 100644 --- a/doc/src/sgml/mvcc.sgml +++ b/doc/src/sgml/mvcc.sgml @@ -1,5 +1,5 @@ @@ -593,7 +593,7 @@ ERROR: Can't serialize access due to concurrent update - GiST and R-Tree indexes + GiST and R-Tree indexes diff --git a/doc/src/sgml/notation.sgml b/doc/src/sgml/notation.sgml index 223f1d5555..5585098104 100644 --- a/doc/src/sgml/notation.sgml +++ b/doc/src/sgml/notation.sgml @@ -1,5 +1,5 @@ @@ -38,10 +38,10 @@ $Header: /cvsroot/pgsql/doc/src/sgml/notation.sgml,v 1.14 2001/02/03 19:03:27 pe Examples will show commands executed from various accounts and programs. - Commands executed from a Unix shell may be preceeded with a dollar sign + Commands executed from a Unix shell may be preceded with a dollar sign ($). Commands executed from particular user - accounts such as root or postgres are specially flagged and explained. - SQL commands may be preceeded with + accounts such as root or postgres are specially flagged and explained. + SQL commands may be preceded with => or will have no leading prompt, depending on the context. @@ -49,7 +49,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/notation.sgml,v 1.14 2001/02/03 19:03:27 pe The notation for - flagging commands is not universally consistant throughout the + flagging commands is not universally consistent throughout the documentation set. Please report problems to the documentation mailing list pgsql-docs@postgresql.org. diff --git a/doc/src/sgml/perform.sgml b/doc/src/sgml/perform.sgml index e9328cb745..0dcdf30503 100644 --- a/doc/src/sgml/perform.sgml +++ b/doc/src/sgml/perform.sgml @@ -1,5 +1,5 @@ @@ -109,9 +109,9 @@ Seq Scan on tenk1 (cost=0.00..333.00 rows=10000 width=148) select * from pg_class where relname = 'tenk1'; - you'll find out that tenk1 has 233 disk + you will find out that tenk1 has 233 disk pages and 10000 tuples. So the cost is estimated at 233 page - reads, defined as 1.0 apiece, plus 10000 * cpu_tuple_cost which is + reads, defined as 1.0 apiece, plus 10000 * cpu_tuple_cost which is currently 0.01 (try show cpu_tuple_cost). @@ -152,7 +152,7 @@ Index Scan using tenk1_unique1 on tenk1 (cost=0.00..173.32 rows=47 width=148) and you will see that if we make the WHERE condition selective enough, the planner will - eventually decide that an indexscan is cheaper than a sequential scan. + eventually decide that an index scan is cheaper than a sequential scan. This plan will only have to visit 50 tuples because of the index, so it wins despite the fact that each individual fetch is more expensive than reading a whole disk page sequentially. @@ -169,7 +169,7 @@ NOTICE: QUERY PLAN: Index Scan using tenk1_unique1 on tenk1 (cost=0.00..173.44 rows=1 width=148) - The added clause "stringu1 = 'xxx'" reduces the output-rows estimate, + The added clause stringu1 = 'xxx' reduces the output-rows estimate, but not the cost because we still have to visit the same set of tuples. @@ -190,18 +190,18 @@ Nested Loop (cost=0.00..269.11 rows=47 width=296) - In this nested-loop join, the outer scan is the same indexscan we had + In this nested-loop join, the outer scan is the same index scan we had in the example before last, and so its cost and row count are the same because we are applying the "unique1 < 50" WHERE clause at that node. The "t1.unique2 = t2.unique2" clause isn't relevant yet, so it doesn't - affect the outer scan's row count. For the inner scan, the + affect row count of the outer scan. For the inner scan, the unique2 value of the current - outer-scan tuple's unique2 value is plugged into the inner indexscan - to produce an indexqual like + outer-scan tuple is plugged into the inner index scan + to produce an index qualification like "t2.unique2 = constant". So we get the - same inner-scan plan and costs that we'd get from, say, "explain select - * from tenk2 where unique2 = 42". The loop node's costs are then set - on the basis of the outer scan's cost, plus one repetition of the + same inner-scan plan and costs that we'd get from, say, explain select + * from tenk2 where unique2 = 42. The costs of the loop node are then set + on the basis of the cost of the outer scan, plus one repetition of the inner scan for each outer tuple (47 * 2.01, here), plus a little CPU time for join processing. @@ -212,7 +212,7 @@ Nested Loop (cost=0.00..269.11 rows=47 width=296) in general you can have WHERE clauses that mention both relations and so can only be applied at the join point, not to either input scan. For example, if we added "WHERE ... AND t1.hundred < t2.hundred", - that'd decrease the output row count of the join node, but not change + that would decrease the output row count of the join node, but not change either input scan. @@ -237,13 +237,13 @@ Hash Join (cost=173.44..557.03 rows=47 width=296) (cost=0.00..173.32 rows=47 width=148) - This plan proposes to extract the 50 interesting rows of tenk1 - using ye same olde indexscan, stash them into an in-memory hash table, - and then do a sequential scan of tenk2, probing into the hash table - for possible matches of "t1.unique2 = t2.unique2" at each tenk2 tuple. - The cost to read tenk1 and set up the hash table is entirely start-up + This plan proposes to extract the 50 interesting rows of tenk1 + using ye same olde index scan, stash them into an in-memory hash table, + and then do a sequential scan of tenk2, probing into the hash table + for possible matches of "t1.unique2 = t2.unique2" at each tenk2 tuple. + The cost to read tenk1 and set up the hash table is entirely start-up cost for the hash join, since we won't get any tuples out until we can - start reading tenk2. The total time estimate for the join also + start reading tenk2. The total time estimate for the join also includes a hefty charge for CPU time to probe the hash table 10000 times. Note, however, that we are NOT charging 10000 times 173.32; the hash table setup is only done once in this plan type. @@ -302,8 +302,8 @@ SELECT * FROM a,b,c WHERE a.id = b.id AND b.ref = c.id; annoyingly long time. When there are too many input tables, the Postgres planner will switch from exhaustive search to a genetic probabilistic search - through a limited number of possibilities. (The switchover threshold is - set by the GEQO_THRESHOLD run-time + through a limited number of possibilities. (The switch-over threshold is + set by the GEQO_THRESHOLD run-time parameter described in the Administrator's Guide.) The genetic search takes less time, but it won't necessarily find the best possible plan. diff --git a/doc/src/sgml/problems.sgml b/doc/src/sgml/problems.sgml index d09d1e148f..5642550903 100644 --- a/doc/src/sgml/problems.sgml +++ b/doc/src/sgml/problems.sgml @@ -1,5 +1,5 @@ @@ -137,10 +137,10 @@ $Header: /cvsroot/pgsql/doc/src/sgml/problems.sgml,v 2.7 2001/03/24 03:40:44 tgl query. You are encouraged to minimize the size of your example, but this is not absolutely necessary. - If the bug is reproduceable, we will find it either way. + If the bug is reproducible, we will find it either way. - If your application uses some other client interface, such as PHP, then + If your application uses some other client interface, such as PHP, then please try to isolate the offending queries. We will probably not set up a web server to reproduce your problem. In any case remember to provide the exact input files, do not guess that the problem happens for @@ -174,7 +174,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/problems.sgml,v 2.7 2001/03/24 03:40:44 tgl The output you expected is very important to state. If you just write "This command gives me that output." or "This is not what I expected.", we might run it ourselves, scan the output, and - think it looks okay and is exactly what we expected. We should not have to + think it looks OK and is exactly what we expected. We should not have to spend the time to decode the exact semantics behind your commands. Especially refrain from merely saying that "This is not what SQL says/Oracle does." Digging out the correct behavior from SQL @@ -188,7 +188,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/problems.sgml,v 2.7 2001/03/24 03:40:44 tgl Any command line options and other start-up options, including concerned environment variables or configuration files that you changed from the - default. Again, be exact. If you are using a pre-packaged + default. Again, be exact. If you are using a prepackaged distribution that starts the database server at boot time, you should try to find out how that is done. @@ -212,7 +212,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/problems.sgml,v 2.7 2001/03/24 03:40:44 tgl old enough. You can also look into the README file in the source directory or at the name of your distribution file or package name. - If you run a pre-packaged version, such as RPMs, say so, including any + If you run a prepackaged version, such as RPMs, say so, including any subversion the package may have. If you are talking about a CVS snapshot, mention that, including its date and time. diff --git a/doc/src/sgml/queries.sgml b/doc/src/sgml/queries.sgml index f5142b7250..eee7e2e9da 100644 --- a/doc/src/sgml/queries.sgml +++ b/doc/src/sgml/queries.sgml @@ -1,4 +1,4 @@ - + Queries @@ -85,7 +85,7 @@ FROM table_reference , table_r A table reference may be a table name or a derived table such as a subquery, a table join, or complex combinations of these. If more than one table reference is listed in the FROM clause they are - CROSS JOINed (see below) to form the derived table that may then + cross-joined (see below) to form the derived table that may then be subject to transformations by the WHERE, GROUP BY, and HAVING clauses and is finally the result of the overall table expression. @@ -150,7 +150,7 @@ FROM table_reference , table_r - Qualified JOINs + Qualified joins @@ -166,7 +166,7 @@ FROM table_reference , table_r The words INNER and OUTER are - optional for all JOINs. INNER is the default; + optional for all joins. INNER is the default; LEFT, RIGHT, and FULL imply an OUTER JOIN. @@ -281,7 +281,7 @@ FROM table_reference , table_r Joins of all types can be chained together or nested: either or both of T1 and - T2 may be JOINed tables. Parentheses + T2 may be joined tables. Parentheses may be used around JOIN clauses to control the join order. In the absence of parentheses, JOIN clauses nest left-to-right. @@ -479,7 +479,7 @@ FROM a NATURAL JOIN b WHERE b.val > 5 Which one of these you use is mainly a matter of style. The JOIN syntax in the FROM clause is probably not as portable to other products. For outer joins there is no choice in any case: they - must be done in the FROM clause. An outer join's ON/USING clause + must be done in the FROM clause. A ON/USING clause of an outer join is not equivalent to a WHERE condition, because it determines the addition of rows (for unmatched input rows) as well as the removal of rows from the final result. @@ -505,16 +505,18 @@ FROM FDT WHERE - In the examples above, FDT is the table derived in the FROM - clause. Rows that do not meet the search condition of the where - clause are eliminated from FDT. Notice the use of scalar - subqueries as value expressions. Just like - any other query, the subqueries can employ complex table - expressions. Notice how FDT is referenced in the subqueries. - Qualifying C1 as FDT.C1 is only necessary if C1 is also the name of a - column in the derived input table of the subquery. Qualifying the - column name adds clarity even when it is not needed. This shows how - the column naming scope of an outer query extends into its inner queries. + In the examples above, FDT is the table derived + in the FROM clause. Rows that do not meet the search condition of + the where clause are eliminated from + FDT. Notice the use of scalar subqueries as + value expressions. Just like any other query, the subqueries can + employ complex table expressions. Notice how + FDT is referenced in the subqueries. + Qualifying C1 as FDT.C1 is only necessary + if C1 is also the name of a column in the derived + input table of the subquery. Qualifying the column name adds + clarity even when it is not needed. This shows how the column + naming scope of an outer query extends into its inner queries. @@ -569,7 +571,7 @@ SELECT pid, p.name, (sum(s.units) * p.price) AS sales FROM products p LEFT JOIN sales s USING ( pid ) GROUP BY pid, p.name, p.price; - In this example, the columns pid, p.name, and p.price must be in + In this example, the columns pid, p.name, and p.price must be in the GROUP BY clause since they are referenced in the query select list. The column s.units does not have to be in the GROUP BY list since it is only used in an aggregate expression @@ -868,12 +870,12 @@ SELECT a, b FROM table1 ORDER BY a + b; SELECT a AS b FROM table1 ORDER BY a; But these extensions do not work in queries involving UNION, INTERSECT, - or EXCEPT, and are not portable to other DBMSes. + or EXCEPT, and are not portable to other DBMS. - Each column specification may be followed by an optional ASC or - DESC to set the sort direction. ASC is default. Ascending order + Each column specification may be followed by an optional ASC or + DESC to set the sort direction. ASC is default. Ascending order puts smaller values first, where smaller is defined in terms of the < operator. Similarly, descending order is determined with the > diff --git a/doc/src/sgml/syntax.sgml b/doc/src/sgml/syntax.sgml index ea65b3c283..ac73f17a67 100644 --- a/doc/src/sgml/syntax.sgml +++ b/doc/src/sgml/syntax.sgml @@ -1,5 +1,5 @@ @@ -462,7 +462,7 @@ CAST ( 'string' AS type ) "$" (dollar) cannot be a single-character operator, although it - can be part of a multi-character operator name. + can be part of a multiple-character operator name. @@ -476,7 +476,7 @@ CAST ( 'string' AS type ) - A multi-character operator name cannot end in "+" or "-", + A multiple-character operator name cannot end in "+" or "-", unless the name also contains at least one of these characters: ~ ! @ # % ^ & | ` ? $ @@ -600,7 +600,7 @@ CAST ( 'string' AS type ) Alternatively, C-style block comments can be used: -/* multi-line comment +/* multiline comment * with nesting: /* nested block comment */ */ @@ -634,7 +634,7 @@ CAST ( 'string' AS type ) - oid + oid @@ -649,20 +649,22 @@ CAST ( 'string' AS type ) - tableoid + tableoid The OID of the table containing this row. This attribute is particularly handy for queries that select from inheritance hierarchies, since without it, it's difficult to tell which - individual table a row came from. The tableoid can be joined - against the OID attribute of pg_class to obtain the table name. + individual table a row came from. The + tableoid can be joined against the + oid column of + pg_class to obtain the table name. - xmin + xmin The identity (transaction ID) of the inserting transaction for @@ -673,7 +675,7 @@ CAST ( 'string' AS type ) - cmin + cmin The command identifier (starting at zero) within the inserting @@ -683,7 +685,7 @@ CAST ( 'string' AS type ) - xmax + xmax The identity (transaction ID) of the deleting transaction, @@ -696,7 +698,7 @@ CAST ( 'string' AS type ) - cmax + cmax The command identifier within the deleting transaction, or zero. @@ -705,16 +707,16 @@ CAST ( 'string' AS type ) - ctid + ctid The tuple ID of the tuple within its table. This is a pair (block number, tuple index within block) that identifies the - physical location of the tuple. Note that although the ctid - can be used to locate the tuple very quickly, a row's ctid + physical location of the tuple. Note that although the ctid + can be used to locate the tuple very quickly, a row's ctid will change each time it is updated or moved by VACUUM FULL. - Therefore ctid is useless as a long-term row identifier. + Therefore ctid is useless as a long-term row identifier. The OID, or even better a user-defined serial number, should be used to identify logical rows. @@ -731,9 +733,9 @@ CAST ( 'string' AS type ) Recommended practice when using OIDs for row identification is to create a unique index on the OID column of each table for which the OID will be used. Never assume that OIDs are unique across tables; use the - combination of tableoid and row OID if you need a database-wide + combination of tableoid and row OID if you need a database-wide identifier. (Future releases of Postgres are likely to use a separate - OID counter for each table, so that tableoid must be + OID counter for each table, so that tableoid must be included to arrive at a globally unique identifier.) diff --git a/doc/src/sgml/typeconv.sgml b/doc/src/sgml/typeconv.sgml index 0bf7fe8967..affdefb24c 100644 --- a/doc/src/sgml/typeconv.sgml +++ b/doc/src/sgml/typeconv.sgml @@ -150,8 +150,8 @@ extended user-defined types to use these same features transparently. An additional heuristic is provided in the parser to allow better guesses at proper behavior for SQL standard types. There are -several basic type categories defined: boolean, -numeric, string, bitstring, datetime, timespan, geometric, network, +several basic type categories defined: boolean, +numeric, string, bitstring, datetime, timespan, geometric, network, and user-defined. Each category, with the exception of user-defined, has a preferred type which is preferentially selected when there is ambiguity. @@ -273,7 +273,7 @@ If only one candidate remains, use it; else continue to the next step. -If any input arguments are "unknown", check the type categories accepted +If any input arguments are unknown, check the type categories accepted at those argument positions by the remaining candidates. At each position, select "string" category if any candidate accepts that category (this bias towards string @@ -281,7 +281,7 @@ is appropriate since an unknown-type literal does look like a string). Otherwise, if all the remaining candidates accept the same type category, select that category; otherwise fail because the correct choice cannot be deduced without more clues. Also note whether -any of the candidates accept a preferred datatype within the selected category. +any of the candidates accept a preferred data type within the selected category. Now discard operator candidates that do not accept the selected type category; furthermore, if any candidate accepts a preferred type at a given argument position, discard candidates that accept non-preferred types for that @@ -391,7 +391,7 @@ tgl=> SELECT 'abc' || 'def' AS "Unspecified"; In this case there is no initial hint for which type to use, since no types are specified in the query. So, the parser looks for all candidate operators and finds that there are candidates accepting both string-category and -bitstring-category inputs. Since string category is preferred when available, +bit-string-category inputs. Since string category is preferred when available, that category is selected, and then the "preferred type" for strings, text, is used as the specific type to resolve the unknown literals to. @@ -440,7 +440,7 @@ will try to oblige. -Check for an exact match in the pg_proc system catalog. +Check for an exact match in the pg_proc system catalog. (Cases involving unknown will never find a match at this step.) @@ -491,7 +491,7 @@ is appropriate since an unknown-type literal does look like a string). Otherwise, if all the remaining candidates accept the same type category, select that category; otherwise fail because the correct choice cannot be deduced without more clues. Also note whether -any of the candidates accept a preferred datatype within the selected category. +any of the candidates accept a preferred data type within the selected category. Now discard operator candidates that do not accept the selected type category; furthermore, if any candidate accepts a preferred type at a given argument position, discard candidates that accept non-preferred types for that @@ -512,10 +512,10 @@ then fail. If no best match could be identified, see whether the function call appears to be a trivial type coercion request. This happens if the function call has just one argument and the function name is the same as the (internal) -name of some datatype. Furthermore, the function argument must be either +name of some data type. Furthermore, the function argument must be either an unknown-type literal or a type that is binary-compatible with the named -datatype. When these conditions are met, the function argument is coerced -to the named datatype. +data type. When these conditions are met, the function argument is coerced +to the named data type. @@ -527,7 +527,7 @@ to the named datatype. Factorial Function -There is only one factorial function defined in the pg_proc catalog. +There is only one factorial function defined in the pg_proc catalog. So the following query automatically converts the int2 argument to int4: @@ -554,7 +554,7 @@ tgl=> select int4fac(int4(int2 '4')); Substring Function -There are two substr functions declared in pg_proc. However, +There are two substr functions declared in pg_proc. However, only one takes two arguments, of types text and int4. @@ -679,8 +679,8 @@ tgl=> SELECT * FROM vv; What's really happened here is that the two unknown literals are resolved to text by default, allowing the || operator to be resolved as text concatenation. Then the text result of the operator -is coerced to varchar to match the target column type. (But, since the -parser knows that text and varchar are binary-compatible, this coercion +is coerced to varchar to match the target column type. (But, since the +parser knows that text and varchar are binary-compatible, this coercion is implicit and does not insert any real function call.) Finally, the sizing function varchar(varchar,int4) is found in the system catalogs and applied to the operator's result and the stored column length. -- 2.11.0