From 731204e0903b32b496d536acddc10df458bab186 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 7 Jan 2002 02:29:15 +0000 Subject: [PATCH] Editorial review --- doc/src/sgml/biblio.sgml | 72 +--- doc/src/sgml/datatype.sgml | 29 +- doc/src/sgml/dfunc.sgml | 8 +- doc/src/sgml/ecpg.sgml | 4 +- doc/src/sgml/extend.sgml | 40 +- doc/src/sgml/func.sgml | 8 +- doc/src/sgml/history.sgml | 52 +-- doc/src/sgml/indices.sgml | 6 +- doc/src/sgml/inherit.sgml | 4 +- doc/src/sgml/intro.sgml | 7 +- doc/src/sgml/libpgeasy.sgml | 17 +- doc/src/sgml/libpgtcl.sgml | 109 ++--- doc/src/sgml/libpq++.sgml | 761 +++++++++++++++++----------------- doc/src/sgml/libpq.sgml | 185 +++++---- doc/src/sgml/lobj.sgml | 52 +-- doc/src/sgml/odbc.sgml | 80 ++-- doc/src/sgml/plperl.sgml | 48 ++- doc/src/sgml/plpython.sgml | 76 +++- doc/src/sgml/plsql.sgml | 4 +- doc/src/sgml/problems.sgml | 4 +- doc/src/sgml/programmer.sgml | 55 ++- doc/src/sgml/query.sgml | 4 +- doc/src/sgml/ref/create_operator.sgml | 6 +- doc/src/sgml/ref/set.sgml | 6 +- doc/src/sgml/regress.sgml | 6 +- doc/src/sgml/rules.sgml | 8 +- doc/src/sgml/runtime.sgml | 12 +- doc/src/sgml/syntax.sgml | 61 ++- doc/src/sgml/xaggr.sgml | 45 +- doc/src/sgml/xfunc.sgml | 48 ++- doc/src/sgml/xindex.sgml | 347 +++++++++------- doc/src/sgml/xoper.sgml | 250 +++++------ doc/src/sgml/xplang.sgml | 73 ++-- doc/src/sgml/xtypes.sgml | 228 +++++----- src/interfaces/ecpg/lib/error.c | 4 +- 35 files changed, 1391 insertions(+), 1328 deletions(-) diff --git a/doc/src/sgml/biblio.sgml b/doc/src/sgml/biblio.sgml index 839dcfd800..2a7a8ae778 100644 --- a/doc/src/sgml/biblio.sgml +++ b/doc/src/sgml/biblio.sgml @@ -1,5 +1,5 @@ @@ -148,76 +148,6 @@ $Header: /cvsroot/pgsql/doc/src/sgml/biblio.sgml,v 1.16 2001/11/21 05:53:40 thom PostgreSQL-Specific Documentation This section is for related documentation. - - The <productname>PostgreSQL</productname> Administrator's Guide - The Administrator's Guide - - Thomas - Lockhart - - - 2001-04-13 - - The PostgreSQL Global Development Group - - - - - The <productname>PostgreSQL</productname> Developer's Guide - The Developer's Guide - - Thomas - Lockhart - - - 2001-04-13 - - The PostgreSQL Global Development Group - - - - - The <productname>PostgreSQL</productname> Programmer's Guide - The Programmer's Guide - - Thomas - Lockhart - - - 2001-04-13 - - The PostgreSQL Global Development Group - - - - - The <productname>PostgreSQL</productname> Tutorial Introduction - The Tutorial - - Thomas - Lockhart - - - 2001-04-13 - - The PostgreSQL Global Development Group - - - - - The <productname>PostgreSQL</productname> User's Guide - The User's Guide - - Thomas - Lockhart - - - 2001-04-13 - - The PostgreSQL Global Development Group - - - Enhancement of the ANSI SQL Implementation of PostgreSQL Simkovics, 1998 diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index daabb71fb6..f596205a1c 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -1,5 +1,5 @@ @@ -270,7 +270,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/datatype.sgml,v 1.82 2002/01/04 17:02:02 th paths, or have several possibilities for formats, such as the date and time types. Most of the input and output functions corresponding to the - base types (e.g., integers and floating point numbers) do some + base types (e.g., integers and floating-point numbers) do some error-checking. Some of the input and output functions are not invertible. That is, the result of an output function may lose precision when compared to @@ -354,7 +354,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/datatype.sgml,v 1.82 2002/01/04 17:02:02 th Numeric types consist of two-, four-, and eight-byte integers, four- and eight-byte - floating point numbers and fixed-precision decimals. + floating-point numbers and fixed-precision decimals. @@ -511,7 +511,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/datatype.sgml,v 1.82 2002/01/04 17:02:02 th numbers and carry out all calculations exactly. It is especially recommended for storing monetary amounts and other quantities where exactness is required. However, the numeric - type is very slow compared to the floating point types described + type is very slow compared to the floating-point types described in the next section. @@ -568,11 +568,11 @@ NUMERIC - Floating Point Types + Floating-Point Types The data types real and double - precision are inexact, variable precision numeric types. + precision are inexact, variable-precision numeric types. In practice, these types are usually implementations of IEEE 754 binary floating point (single and double precision, respectively), to the extent that the underlying processor, @@ -606,7 +606,7 @@ NUMERIC - Comparing two floating point values for equality may or may + Comparing two floating-point values for equality may or may not work as expected. @@ -682,14 +682,6 @@ CREATE TABLE tablename ( will never be duplicates, either. - - - The implicit sequence created for the serial type will - not be automatically removed when the - table is dropped. - - - The type names serial and serial4 are equivalent: both create integer columns. The type @@ -741,7 +733,7 @@ CREATE TABLE tablename ( Input is accepted in a variety of formats, including integer and - floating point literals, as well as typical + floating-point literals, as well as typical currency formatting, such as '$1,000.00'. Output is in the latter form. @@ -1165,8 +1157,6 @@ SELECT b, char_length(b) FROM test2; escape character. - - Compatibility Bytea provides most of the functionality of the binary string type per SQL99 section 4.3. A comparison of SQL99 Binary @@ -1248,7 +1238,6 @@ SELECT b, char_length(b) FROM test2; - @@ -1335,7 +1324,7 @@ SELECT b, char_length(b) FROM test2; specifies the number of fractional digits retained in the seconds field. By default, there is no explicit bound on precision. The effective limit of precision is determined by the underlying double - precision floating point number used to store values (in seconds + precision floating-point number used to store values (in seconds for interval and in seconds since 2000-01-01 for timestamp). The useful range of p is from 0 to about diff --git a/doc/src/sgml/dfunc.sgml b/doc/src/sgml/dfunc.sgml index b02c4f0164..dc041b1480 100644 --- a/doc/src/sgml/dfunc.sgml +++ b/doc/src/sgml/dfunc.sgml @@ -1,5 +1,5 @@ @@ -40,7 +40,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/dfunc.sgml,v 1.17 2001/11/21 05:53:41 thoma In the following examples we assume that your source code is in a - file foo.c and we will create an shared library + file foo.c and we will create a shared library foo.so. The intermediate object file will be called foo.o unless otherwise noted. A shared library can contain more than one object file, but we only use one @@ -121,8 +121,8 @@ ld -b -o foo.sl foo.o - Irix - Irix + IRIX + IRIX PIC is the default, no special compiler diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index 9997ca2eae..1c0ab5c645 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -1,5 +1,5 @@ @@ -300,7 +300,7 @@ struct sqlca - -206 (ECPG_FLOAT_FORMAT): Not correctly formatted floating point type: %s line %d. + -206 (ECPG_FLOAT_FORMAT): Not correctly formatted floating-point type: %s line %d. This means the host variable is of type float and diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml index d17429694a..4ae498e02a 100644 --- a/doc/src/sgml/extend.sgml +++ b/doc/src/sgml/extend.sgml @@ -1,5 +1,5 @@ @@ -18,7 +18,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.14 2001/11/21 05:53:41 thom - types + data types @@ -56,17 +56,16 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.14 2001/11/21 05:53:41 thom extended by users. By comparison, conventional database systems can only be extended by changing hardcoded procedures within the DBMS or by loading modules - specially-written by the DBMS vendor. + specially written by the DBMS vendor. PostgreSQL is also unlike most other data managers in that the server can incorporate user-written code into itself through dynamic loading. That is, the user can - specify an object code file (e.g., a compiled .o file - or shared library) that implements a new type or function + specify an object code file (e.g., a shared library) that implements a new type or function and PostgreSQL will load it as required. Code written - in SQL are even more trivial to add to the server. + in SQL is even more trivial to add to the server. This ability to modify its operation on the fly makes PostgreSQL uniquely suited for rapid prototyping of new applications and storage structures. @@ -80,14 +79,14 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.14 2001/11/21 05:53:41 thom The PostgreSQL type system can be broken down in several ways. Types are divided into base types and composite types. - Base types are those, like int4, that are implemented - in a language such as C. They generally correspond to + Base types are those, like int4, that are implemented + in a language such as C. They generally correspond to what are often known as abstract data types; PostgreSQL can only operate on such types through methods provided by the user and only understands the behavior of such types to the extent that the user describes them. Composite types are created whenever the user creates a - table. EMP is an example of a composite type. + table. @@ -100,10 +99,10 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.14 2001/11/21 05:53:41 thom PostgreSQL base types are further divided into built-in types and user-defined types. Built-in types (like - int4) are those that are compiled + int4) are those that are compiled into the system. User-defined types are those created by the user in the - manner to be described below. + manner to be described later. @@ -118,7 +117,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.14 2001/11/21 05:53:41 thom information given here, so mark this page for later reference. All system catalogs have names that begin with - pg_. + pg_. The following tables contain information that may be useful to the end user. (There are many other system catalogs, but there should rarely be a reason to query @@ -149,15 +148,15 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.14 2001/11/21 05:53:41 thom pg_index - secondary indexes + indexes pg_proc - procedures (both C and SQL) + procedures/functions pg_type - types (both base and complex) + data types (both base and complex) pg_operator @@ -165,7 +164,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.14 2001/11/21 05:53:41 thom pg_aggregate - aggregates and aggregate functions + aggregate functions pg_am @@ -198,7 +197,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.14 2001/11/21 05:53:41 thom - The Reference Manual gives a more detailed explanation + The Developer's Guide gives a more detailed explanation of these catalogs and their columns. However, shows the major entities and their relationships @@ -257,10 +256,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/extend.sgml,v 1.14 2001/11/21 05:53:41 thom have obvious meanings, but there are many (particularly those that have to do with access methods) that do not. The relationships between - pg_am, pg_amop, pg_amproc, pg_operator and - pg_opclass are particularly hard to understand - and will be described in depth (in the section - on interfacing types and operators to indexes) + pg_am, pg_amop, pg_amproc, pg_operator, and + pg_opclass are particularly hard to understand + and will be described in depth (in ) after we have discussed basic extensions. diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 52f20f2158..2cf4466d07 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -1,5 +1,5 @@ @@ -4506,7 +4506,7 @@ SELECT NULLIF(value, '(none)') ... bigint, real, double precision, numeric, interval. The result is of type numeric for any integer type - input, double precision for floating point input, + input, double precision for floating-point input, otherwise the same as the input data type. @@ -4555,7 +4555,7 @@ SELECT NULLIF(value, '(none)') ... data types: smallint, integer, bigint, real, double precision, numeric. The result is of type - double precision for floating point input, + double precision for floating-point input, otherwise numeric. @@ -4571,7 +4571,7 @@ SELECT NULLIF(value, '(none)') ... The result is of type bigint for smallint or integer input, numeric for bigint - input, double precision for floating point input, + input, double precision for floating-point input, otherwise the same as the input data type. diff --git a/doc/src/sgml/history.sgml b/doc/src/sgml/history.sgml index cfb069fd99..8f2d74117b 100644 --- a/doc/src/sgml/history.sgml +++ b/doc/src/sgml/history.sgml @@ -1,5 +1,5 @@ @@ -9,7 +9,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet The object-relational database management system now known as PostgreSQL (and briefly called Postgres95) is derived from the - Postgres package written at the University of + POSTGRES package written at the University of California at Berkeley. With over a decade of development behind it, PostgreSQL is the most advanced open-source database available anywhere, @@ -20,10 +20,10 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet - The Berkeley <productname>Postgres</productname> Project + The Berkeley <productname>POSTGRES</productname> Project - Implementation of the Postgres + Implementation of the POSTGRES DBMS began in 1986. The initial concepts for the system were presented in @@ -60,24 +60,24 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet - Postgres has been used + POSTGRES has been used to implement many different research and production applications. These include: a financial data analysis system, a jet engine performance monitoring package, an asteroid tracking database, a medical information database, and several geographic information systems. - Postgres has also been + POSTGRES has also been used as an educational tool at several universities. Finally, - Illustra Information Technologies - (since merged into - Informix) + Illustra Information Technologies (later merged into + Informix, + which is now owned by IBM.) picked up the code and commercialized it. - Postgres became the primary data manager + POSTGRES became the primary data manager for the - Sequoia 2000 + Sequoia 2000 scientific computing project in late 1992. @@ -88,7 +88,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet support was taking up large amounts of time that should have been devoted to database research. In an effort to reduce this support burden, the Berkeley - Postgres project officially + POSTGRES project officially ended with Version 4.2. @@ -98,10 +98,10 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet In 1994, Andrew Yu and Jolly Chen - added a SQL language interpreter to Postgres. + added a SQL language interpreter to POSTGRES. Postgres95 was subsequently released to the Web to find its own way in the world as an - open-source descendant of the original Postgres + open-source descendant of the original POSTGRES Berkeley code. @@ -109,15 +109,15 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet Postgres95 code was completely ANSI C and trimmed in size by 25%. Many internal changes improved performance and maintainability. - Postgres95 v1.0.x ran about 30-50% + Postgres95 release 1.0.x ran about 30-50% faster on the Wisconsin Benchmark compared to - Postgres v4.2. - Apart from bug fixes, these were the major enhancements: + POSTGRES, Version 4.2. + Apart from bug fixes, the following were the major enhancements: - The query language Postquel was replaced with + The query language PostQUEL was replaced with SQL (implemented in the server). Subqueries were not supported until PostgreSQL (see below), but they @@ -150,7 +150,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet - The large object interface was overhauled. The Inversion large objects were + The large-object interface was overhauled. The Inversion large objects were the only mechanism for storing large objects. (The Inversion file system was removed.) @@ -175,7 +175,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet GNU make (instead of BSD make) was used for the build. Also, Postgres95 could be - compiled with an unpatched gcc + compiled with an unpatched GCC (data alignment of doubles was fixed). @@ -190,11 +190,11 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet By 1996, it became clear that the name Postgres95 would not stand the test of time. We chose a new name, PostgreSQL, to reflect the relationship - between the original Postgres and the more + between the original POSTGRES and the more recent versions with SQL capability. At the same time, we set the version numbering to start at 6.0, putting the - numbers back into the sequence originally begun by the - Postgres Project. + numbers back into the sequence originally begun by the Berkeley + POSTGRES project. @@ -212,9 +212,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet - Table-level locking has been replaced with multiversion concurrency control, + Table-level locking has been replaced by multiversion concurrency control, which allows readers to continue reading consistent data during writer activity - and enables hot backups from pg_dump while the database stays available for + and enables hot backups from pg_dump while the database stays available for queries. @@ -244,7 +244,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.18 2001/11/28 20:49:10 pet Overall backend code speed has been increased by approximately 20-40%, - and backend start-up time has decreased 80% since version 6.0 was released. + and backend start-up time has decreased by 80% since version 6.0 was released. diff --git a/doc/src/sgml/indices.sgml b/doc/src/sgml/indices.sgml index 26be6a833a..417a50dcb3 100644 --- a/doc/src/sgml/indices.sgml +++ b/doc/src/sgml/indices.sgml @@ -1,4 +1,4 @@ - + Indexes @@ -397,8 +397,8 @@ CREATE INDEX name ON table bigbox_ops both support R-tree indexes on the box data type. The difference between them is that bigbox_ops scales box coordinates down, - to avoid floating point exceptions from doing multiplication, - addition, and subtraction on very large floating point + to avoid floating-point exceptions from doing multiplication, + addition, and subtraction on very large floating-point coordinates. If the field on which your rectangles lie is about 20 000 units square or larger, you should use bigbox_ops. diff --git a/doc/src/sgml/inherit.sgml b/doc/src/sgml/inherit.sgml index 36c7650101..cb4c8cb0af 100644 --- a/doc/src/sgml/inherit.sgml +++ b/doc/src/sgml/inherit.sgml @@ -1,5 +1,5 @@ @@ -28,7 +28,7 @@ CREATE TABLE capitals ( text, a native PostgreSQL type for variable length ASCII strings. The type of the attribute population is float, a native PostgreSQL type for double precision - floating point numbers. State capitals have an extra + floating-point numbers. State capitals have an extra attribute, state, that shows their state. In PostgreSQL, a table can inherit from zero or more other tables, and a query can reference either all rows of a diff --git a/doc/src/sgml/intro.sgml b/doc/src/sgml/intro.sgml index 51b405d168..5b98495e93 100644 --- a/doc/src/sgml/intro.sgml +++ b/doc/src/sgml/intro.sgml @@ -1,5 +1,5 @@ @@ -38,8 +38,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/intro.sgml,v 1.18 2001/11/23 22:06:20 tgl E type. In current commercial systems, possible types include floating point numbers, integers, character strings, money, and dates. It is commonly recognized - that this model is inadequate for future data - processing applications. + that this model is inadequate for future data-processing applications. The relational model successfully replaced previous models in part because of its Spartan simplicity. However, this simplicity makes the @@ -76,7 +75,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/intro.sgml,v 1.18 2001/11/23 22:06:20 tgl E rules - transaction integrity + transactional integrity diff --git a/doc/src/sgml/libpgeasy.sgml b/doc/src/sgml/libpgeasy.sgml index 3e1971d50b..9196621c8c 100644 --- a/doc/src/sgml/libpgeasy.sgml +++ b/doc/src/sgml/libpgeasy.sgml @@ -1,9 +1,9 @@ - - <application>libpgeasy</application> - Simplified C Library + + <application>libpgeasy</application> - Simplified C Library Author @@ -16,9 +16,10 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpgeasy.sgml,v 2.7 2001/10/01 21:47 - pgeasy allows you to cleanly interface - to the libpq library, - more like a 4GL SQL interface. + pgeasy allows you to cleanly interface + to the libpq library, more like a 4GL + SQL interface. Refer to for more + information about libpq @@ -111,8 +112,8 @@ void unset_result(PGresult *oldres); For SELECT queries, fetch allows you to pass pointers as parameters, and on return the variables are filled with data from the binary cursor you opened. These binary - cursors can not be used if you are running the - pgeasy + cursors cannot be used if you are running the + pgeasy client on a system with a different architecture than the database server. If you pass a NULL pointer parameter, the column is skipped. fetchwithnulls allows you to retrieve the NULL diff --git a/doc/src/sgml/libpgtcl.sgml b/doc/src/sgml/libpgtcl.sgml index 5114cc2b52..2025dc0ff0 100644 --- a/doc/src/sgml/libpgtcl.sgml +++ b/doc/src/sgml/libpgtcl.sgml @@ -9,22 +9,28 @@ Tcl - -pgtcl is a Tcl package for front-end programs -to interface with PostgreSQL -backends. It makes most of the functionality of libpq available to -Tcl scripts. - + + Introduction - -This package was originally written by Jolly Chen. - + + pgtcl is a Tcl package for client + programs to interface with PostgreSQL + servers. It makes most of the functionality of + libpq available to Tcl scripts. + - -Commands + + This package was originally written by Jolly Chen. + - - + + gives an overview over the + commands available in pgtcl. These + commands are described further on subsequent pages. + + + +
<literal>pgtcl</literal> Commands @@ -106,34 +112,30 @@ This package was originally written by Jolly Chen.
-
- - -These commands are described further on subsequent pages. - - -The pg_lo* routines are interfaces to the Large Object features of -PostgreSQL. -The functions are designed to mimic the analogous file system functions in -the standard Unix file system interface. -The pg_lo* routines should be used within a BEGIN/END transaction -block because the file descriptor returned by pg_lo_open is only valid for -the current transaction. pg_lo_import and pg_lo_export MUST be used -in a BEGIN/END transaction block. - - -
- - -Examples - - + + The pg_lo_* routines are interfaces to the + large object features of PostgreSQL. + The functions are designed to mimic the analogous file system + functions in the standard Unix file system interface. The + pg_lo_* routines should be used within a + BEGIN/COMMIT transaction + block because the file descriptor returned by + pg_lo_open is only valid for the current + transaction. pg_lo_import and + pg_lo_export must be used + in a BEGIN/COMMIT transaction + block. + + + + shows a small example of how to use + the routines. + + + <application>pgtcl</application> Example Program - - Here's a small example of how to use the routines: - # getDBs : # get the names of all the databases at a given host and port number @@ -151,10 +153,9 @@ proc getDBs { {host "localhost"} {port "5432"} } { pg_disconnect $conn return $datnames } - - + - +
Loading <application>pgtcl</application> into your application @@ -165,7 +166,7 @@ proc getDBs { {host "localhost"} {port "5432"} } { done with the Tcl load command. Here is an example: - load libpgtcl[info sharedlibextension] +load libpgtcl[info sharedlibextension] The use of info sharedlibextension is recommended in @@ -174,11 +175,13 @@ proc getDBs { {host "localhost"} {port "5432"} } {
- The load command will fail unless the system's dynamic loader - knows where to look for the libpgtcl shared library file. - You may need to work with ldconfig, or set the environment - variable LD_LIBRARY_PATH, or use some equivalent facility for - your platform to make it work. + The load command will fail unless the system's dynamic + loader knows where to look for the libpgtcl shared + library file. You may need to work with ldconfig, or + set the environment variable LD_LIBRARY_PATH, or use + some equivalent facility for your platform to make it work. Refer + to the PostgreSQL installation instructions for + more information. @@ -209,7 +212,7 @@ proc getDBs { {host "localhost"} {port "5432"} } { pg_connect -opens a connection to the backend server +open a connection to the backend server pgtclconnecting pg_connect @@ -363,7 +366,7 @@ for info about the available options in the newer syntax. pg_disconnect -closes a connection to the backend server +close a connection to the backend server pgtclconnecting pg_connect @@ -522,7 +525,7 @@ current default value for each option. pg_exec -send a query string to the backend +send a command string to the server pgtclconnecting pg_connect @@ -999,7 +1002,7 @@ This would work if table table has fields control pg_listen -sets or changes a callback for asynchronous NOTIFY messages +set or change a callback for asynchronous NOTIFY messages pgtclnotify notify @@ -1859,7 +1862,7 @@ None pg_lo_import -import a large object from a Unix file +import a large object from a file pgtclimport pg_lo_import @@ -1944,7 +1947,7 @@ None pg_lo_export -export a large object to a Unix file +export a large object to a file pgtclexport pg_lo_export diff --git a/doc/src/sgml/libpq++.sgml b/doc/src/sgml/libpq++.sgml index 49fd183083..b5a24e0e2e 100644 --- a/doc/src/sgml/libpq++.sgml +++ b/doc/src/sgml/libpq++.sgml @@ -1,28 +1,38 @@ <application>libpq++</application> - C++ Binding Library + + + libpq++ + + + C++ + + + Introduction + - libpq++ is the C++ API to + libpq++ is the C++ API to PostgreSQL. - libpq++ is a set of classes that allow + libpq++ is a set of classes that allow client programs to connect to the PostgreSQL backend server. These connections - come in two forms: a Database Class and a Large Object class. + come in two forms: a database class and a large object class. - The Database Class is intended for manipulating a database. You can - send all sorts of SQL queries to the PostgreSQL + The database class is intended for manipulating a database. You can + send all sorts of SQL queries and commands to the PostgreSQL backend server and retrieve the responses of the server. - The Large Object Class is intended for manipulating a large object - in a database. Although a Large Object instance can send normal + The large object class is intended for manipulating a large object + in a database. Although a large object instance can send normal queries to the PostgreSQL backend server it is only intended for simple queries that do not return any data. A large object should be seen @@ -35,16 +45,15 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: - This chapter is based on the documentation - for the libpq C library. Three - short programs are listed at the end of this section as examples of - libpq++ programming - (though not necessarily of good programming). - There are several examples of libpq++ + This chapter is based on the documentation for the + libpq C library (see ). There + are several examples of libpq++ applications in - src/libpq++/examples, including the source - code for the three examples in this chapter. + src/interfaces/libpq++/examples in the source + distribution. + Control and Initialization @@ -217,45 +226,51 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: PgConnection makes a new connection to a backend database server. - - PgConnection::PgConnection(const char *conninfo) - - The conninfo string is the same as for the underlying - libpq PQconnectdb function. + +PgConnection::PgConnection(const char *conninfo) + + The conninfo string is the same as for the underlying + libpq PQconnectdb function. + + Although typically called from one of the access classes, a connection to a backend server is possible by creating a PgConnection object. + ConnectionBad returns whether or not the connection to the backend server succeeded or failed. - - bool PgConnection::ConnectionBad() const - + +bool PgConnection::ConnectionBad() const + Returns true if the connection failed. + Status returns the status of the connection to the backend server. - - ConnStatusType PgConnection::Status() - - Returns either CONNECTION_OK or CONNECTION_BAD depending on the state - of the connection. + +ConnStatusType PgConnection::Status() + + Returns either CONNECTION_OK or + CONNECTION_BAD depending on the state of the + connection. + PgDatabase makes a new connection to a backend database server. - - PgDatabase(const char *conninfo) - + +PgDatabase(const char *conninfo) + After a PgDatabase has been created it should be checked to make sure the connection to the database succeeded before sending queries to the object. This can easily be done by @@ -263,24 +278,26 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: Status or ConnectionBad methods. + DBName - Returns the name of the current database. - - const char *PgConnection::DBName() - + returns the name of the current database. + +const char *PgConnection::DBName() + + Notifies - Returns the next notification from a list of unhandled notification messages + returns the next notification from a list of unhandled notification messages received from the backend. - - PGnotify* PgConnection::Notifies() - - See PQnotifies for details. + +PGnotify* PgConnection::Notifies() + + See PQnotifies in libpq for details. @@ -297,19 +314,20 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: Exec - Sends a query to the backend server. It's probably more desirable to + sends a command to the backend server. It's probably more desirable to use one of the next two functions. - - ExecStatusType PgConnection::Exec(const char* query) - - Returns the result of the query. The following status results can be expected: + +ExecStatusType PgConnection::Exec(const char* query) + + Returns the result status of the command. The following status + results can be expected: PGRES_EMPTY_QUERY - PGRES_COMMAND_OK, if the query was a command + PGRES_COMMAND_OK, if the command was not a query PGRES_TUPLES_OK, if the query successfully returned tuples @@ -332,353 +350,342 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: + - ExecCommandOk - Sends a command query to the backend server. - - int PgConnection::ExecCommandOk(const char *query) - - Returns TRUE if the command query succeeds. + ExecCommandOk sends a non-query command + (one that does not return rows) to the backend server. + +int PgConnection::ExecCommandOk(const char *query) + + Returns true (1) if the command succeeds. + ExecTuplesOk - Sends a command query to the backend server. - - int PgConnection::ExecTuplesOk(const char *query) - - Returns TRUE if the command query succeeds. + Sends a query command (one that returns rows) to the backend server. + +int PgConnection::ExecTuplesOk(const char *query) + + Returns true (1) if the query succeeds. + ErrorMessage - Returns the last error message text. - - const char *PgConnection::ErrorMessage() - + returns the last error message text. + +const char *PgConnection::ErrorMessage() + - +
Retrieving SELECT Result Information - - - - Tuples - Returns the number of tuples (rows) in the query result. - - int PgDatabase::Tuples() const - - - - - - Fields - Returns the number of fields (attributes) in each tuple of the query result. - - int PgDatabase::Fields() - - - - - - FieldName - Returns the field (attribute) name associated with the given field index. - Field indices start at 0. - - const char *PgDatabase::FieldName(int field_num) const - - - - - - FieldNum - PQfnumber Returns the field (attribute) index associated with - the given field name. - - int PgDatabase::FieldNum(const char* field_name) const - - -1 is returned if the given name does not match any field. - - - - - FieldType - Returns the field type associated with the given field index. The - integer returned is an internal coding of the type. Field indices - start at 0. - - Oid PgDatabase::FieldType(int field_num) const - - - - - - FieldType - Returns the field type associated with the given field name. The - integer returned is an internal coding of the type. Field indices - start at 0. - - Oid PgDatabase::FieldType(const char* field_name) const - - - - - - FieldSize - Returns the size in bytes of the field associated with the given - field index. Field indices start at 0. - - int PgDatabase::FieldSize(int field_num) const - - Returns the space allocated for this field in a database tuple given - the field number. In other words the size of the server's binary - representation of the data type. -1 is returned if the field is - variable size. - - - - - FieldSize - Returns the size in bytes of the field associated with the given - field index. Field indices start at 0. - - int PgDatabase::FieldSize(const char *field_name) const - - Returns the space allocated for this field in a database tuple given - the field name. In other words the size of the server's binary - representation of the data type. -1 is returned if the field is - variable size. - - - - + + + + Tuples + returns the number of tuples (rows) in the query result. + +int PgDatabase::Tuples() const + + + + + + + Fields + returns the number of fields (rows) in each tuple of the query result. + +int PgDatabase::Fields() + + + + + + + FieldName + returns the field (column) name associated with the given field index. + Field indices start at 0. + +const char *PgDatabase::FieldName(int field_num) const + + + + + + + FieldNum + returns the field (column) index associated with + the given field name. + +int PgDatabase::FieldNum(const char* field_name) const + + -1 is returned if the given name does not match any field. + + + + + + FieldType + returns the field type associated with the given field index. The + integer returned is an internal coding of the type. Field indices + start at 0. + +Oid PgDatabase::FieldType(int field_num) const + + + + + + + FieldType + returns the field type associated with the given field name. The + integer returned is an internal coding of the type. Field indices + start at 0. + +Oid PgDatabase::FieldType(const char* field_name) const + + + + + + + FieldSize + returns the size in bytes of the field associated with the given + field index. Field indices start at 0. + +int PgDatabase::FieldSize(int field_num) const + + Returns the space allocated for this field in a database tuple + given the field number. In other words the size of the server's + binary representation of the data type. -1 is returned if the + field is variable size. + + + + + + FieldSize + returns the size in bytes of the field associated with the given + field index. Field indices start at 0. + +int PgDatabase::FieldSize(const char *field_name) const + + Returns the space allocated for this field in a database tuple + given the field name. In other words the size of the server's + binary representation of the data type. -1 is returned if the + field is variable size. + + + +
Retrieving SELECT Result Values - - - - GetValue - Returns a single field (attribute) value of one tuple of a PGresult. - Tuple and field indices start at 0. - - const char *PgDatabase::GetValue(int tup_num, int field_num) const - - For most queries, the value returned by GetValue is a null-terminated - string representation of the attribute value. But if BinaryTuples - is TRUE, the value returned by GetValue is the binary representation - of the type in the internal format of the backend server (but not including - the size word, if the field is variable-length). It is then the programmer's - responsibility to cast and convert the data to the correct C type. The - pointer returned by GetValue points to storage that is part of the - PGresult structure. One should not modify it, and one must explicitly - copy the value into other storage if it is to be used past the lifetime - of the PGresult structure itself. BinaryTuples is not yet implemented. - - - - - GetValue - Returns a single field (attribute) value of one tuple of a PGresult. - Tuple and field indices start at 0. - - const char *PgDatabase::GetValue(int tup_num, const char *field_name) const - - For most queries, the value returned by GetValue is a null-terminated - string representation of the attribute value. But if BinaryTuples - is TRUE, the value returned by GetValue is the binary representation - of the type in the internal format of the backend server (but not including - the size word, if the field is variable-length). It is then the programmer's - responsibility to cast and convert the data to the correct C type. The - pointer returned by GetValue points to storage that is part of the - PGresult structure. One should not modify it, and one must explicitly - copy the value into other storage if it is to be used past the lifetime - of the PGresult structure itself. BinaryTuples is not yet implemented. - - - - - GetLength - Returns the length of a field (attribute) in bytes. Tuple and field - indices start at 0. - - int PgDatabase::GetLength(int tup_num, int field_num) const - - This is the actual data length for the particular data value, that - is the size of the object pointed to by GetValue. Note that for - ASCII-represented values, this size has little to do with the binary - size reported by PQfsize. - - - - - GetLength - Returns the length of a field (attribute) in bytes. Tuple and field - indices start at 0. - - int PgDatabase::GetLength(int tup_num, const char* field_name) const - - This is the actual data length for the particular data value, that - is the size of the object pointed to by GetValue. Note that for - ASCII-represented values, this size has little to do with the binary - size reported by PQfsize. - - - - - GetIsNull - Returns whether a field has the null value. - - bool GetIsNull(int tup_num, int field_num) const - - Note that GetValue will return the empty string for null fields, not - the NULL pointer. - - - - - GetIsNull - Returns whether a field has the null value. - - bool GetIsNull(int tup_num, const char *field_name) const - - Note that GetValue will return the empty string for null fields, not - the NULL pointer. - - - - - DisplayTuples - OBSOLESCENT: Prints out all the tuples and, optionally, the attribute names to the - specified output stream. - - void PgDatabase::DisplayTuples(FILE *out = 0, bool fillAlign = true, - const char* fieldSep = "|",bool printHeader = true, bool quiet = false) const - - - - - - PrintTuples - OBSOLESCENT: Prints out all the tuples and, optionally, the attribute names to the - specified output stream. - - void PgDatabase::PrintTuples(FILE *out = 0, bool printAttName = true, - bool terseOutput = false, bool fillAlign = false) const - - - - - + + + + GetValue returns a single field (column) + value of one tuple of a PGresult. + Tuple and field indices start at 0. + +const char *PgDatabase::GetValue(int tup_num, int field_num) const + + For most queries, the value returned by + GetValue is a null-terminated string + representation of the attribute value. But if + BinaryTuples is true, the value returned by + GetValue is the binary representation of + the type in the internal format of the backend server (but not + including the size word, if the field is variable-length). It is + then the programmer's responsibility to cast and convert the + data to the correct C type. The pointer returned by + GetValue points to storage that is part of + the PGresult structure. One should not + modify it, and one must explicitly copy the value into other + storage if it is to be used past the lifetime of the + PGresult structure itself. + BinaryTuples is not yet implemented. + + - - Retrieving Non-SELECT Result Information + + + GetValue returns a single field (column) + value of one tuple of a PGresult. + Tuple and field indices start at 0. + +const char *PgDatabase::GetValue(int tup_num, const char *field_name) const + + For most queries, the value returned by + GetValue is a null-terminated string + representation of the attribute value. But if + BinaryTuples is true, the value returned by + GetValue is the binary representation of + the type in the internal format of the backend server (but not + including the size word, if the field is variable-length). It is + then the programmer's responsibility to cast and convert the + data to the correct C type. The pointer returned by + GetValue points to storage that is part of + the PGresult structure. One should not + modify it, and one must explicitly copy the value into other + storage if it is to be used past the lifetime of the + PGresult structure itself. + BinaryTuples is not yet implemented. + + - - - - CmdTuples - Returns the number of rows affected after an INSERT, UPDATE or DELETE. - If the command was anything else, it returns -1. - - int PgDatabase::CmdTuples() const - - - + + + GetLength returns the length of a field + (column) in bytes. Tuple and field indices start at 0. + +int PgDatabase::GetLength(int tup_num, int field_num) const + + This is the actual data length for the particular data value, + that is the size of the object pointed to by + GetValue. Note that for + character-represented values, this size has little to do with + the binary size reported by PQfsize. + + - - - OidStatus - - const char *PgDatabase::OidStatus() const - - - - - + + + GetLength returns the length of a field + (column) in bytes. Tuple and field indices start at 0. + +int PgDatabase::GetLength(int tup_num, const char* field_name) const + + This is the actual data length for the particular data value, + that is the size of the object pointed to by + GetValue. Note that for + character-represented values, this size has little to do with + the binary size reported by PQfsize. + + - - Handling COPY Queries + + + GetIsNull + returns whether a field has the null value. + +bool GetIsNull(int tup_num, int field_num) const + + Note that GetValue will return the empty + string for null fields, not the NULL pointer. + + - - - - GetLine - - int PgDatabase::GetLine(char* string, int length) - - - - - - PutLine - - void PgDatabase::PutLine(const char* string) - - - - - - EndCopy - - int PgDatabase::EndCopy() - - - - + + + GetIsNull returns whether a field has the + null value. + +bool GetIsNull(int tup_num, const char *field_name) const + + Note that GetValue will return the empty + string for null fields, not the NULL pointer. + + - - + + + DisplayTuples prints out all the tuples + and, optionally, the attribute names to the specified output + stream. + +void PgDatabase::DisplayTuples(FILE *out = 0, bool fillAlign = true, +const char* fieldSep = "|", bool printHeader = true, bool quiet = false) const + + This function is obsolescent. + + + + + + PrintTuples prints out all the tuples and, + optionally, the attribute names to the specified output stream. + +void PgDatabase::PrintTuples(FILE *out = 0, bool printAttName = true, +bool terseOutput = false, bool fillAlign = false) const + + This function is obsolescent. + + + + + + + Retrieving Non-SELECT Result Information + + + + + CmdTuples returns the number of rows + affected after an INSERT, + UPDATE, or DELETE. If the + command was anything else, it returns -1. + +int PgDatabase::CmdTuples() const + + + + + + + OidStatus + +const char *PgDatabase::OidStatus() const + + + + + + + Asynchronous Notification - PostgreSQL supports asynchronous notification - via the LISTEN and NOTIFY - commands. A backend registers its interest in a particular semaphore - with the LISTEN command. - All backends that are listening on a - particular named semaphore will be notified asynchronously when - a NOTIFY of - that name is executed by another backend. No additional - information is passed from the notifier to the listener. Thus, - typically, any actual data that needs to be communicated is transferred - through the relation. - - - - In the past, the documentation has associated the names used for asynchronous - notification with relations or classes. However, there is in fact no - direct linkage of the two concepts in the implementation, and the - named semaphore in fact does not need to have a corresponding relation - previously defined. - - + PostgreSQL supports asynchronous + notification via the LISTEN and + NOTIFY commands. A backend registers its + interest in a particular notification condition with the + LISTEN command. All backends that are + listening on a particular condition will be notified + asynchronously when a NOTIFY of that name is + executed by another backend. No additional information is passed + from the notifier to the listener. Thus, typically, any actual + data that needs to be communicated is transferred through a + relation. + - libpq++ applications are notified whenever a + libpq++ applications are notified whenever a connected backend has received an asynchronous notification. However, the communication from the backend to the frontend is not asynchronous. - The libpq++ application + The libpq++ application must poll the backend to see if there is any pending notification - information. After the execution of a query, a frontend may call + information. After the execution of a command, a frontend may call PgDatabase::Notifies to see if any notification data is currently available from the backend. PgDatabase::Notifies returns the notification from a list of unhandled notifications from the - backend. The function returns NULL if there are no pending notifications + backend. The function returns NULL if there are no pending notifications from the backend. PgDatabase::Notifies behaves like the popping of a stack. Once a notification is returned @@ -692,9 +699,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: PgDatabase::Notifies retrieves pending notifications from the server. - - PGnotify* PgDatabase::Notifies() - + +PGnotify* PgDatabase::Notifies() + @@ -709,9 +716,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: Functions Associated with the COPY Command - The copy command in PostgreSQL + The COPY command in PostgreSQL has options to read from or write to the network - connection used by libpq++. + connection used by libpq++. Therefore, functions are necessary to access this network connection directly so applications may take full advantage of this capability. @@ -724,32 +731,33 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: backend server) into a buffer string of size length. - - int PgDatabase::GetLine(char* string, int length) - + +int PgDatabase::GetLine(char* string, int length) + + Like the Unix system routine - fgets (3), + fgets(), this routine copies up to length-1 characters into string. It is like - gets (3), - however, in that it converts the terminating newline into a null - character. + gets(), + however, in that it converts the terminating newline into a zero byte. PgDatabase::GetLine - returns EOF at end of file, 0 if the entire line has been read, and 1 if the + returns EOF at end of file, 0 if the entire line has been read, and 1 if the buffer is full but the terminating newline has not yet been read. Notice that the application must check to see if a new line consists - of a single period ("."), which indicates that the backend + of a backslash followed by a period (\.), which indicates + that the backend server has finished sending the results of the - copy. + COPY. Therefore, if the application ever expects to receive lines that are more than length-1 @@ -762,24 +770,24 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: PgDatabase::PutLine Sends a null-terminated string to the backend server. - - void PgDatabase::PutLine(char* string) - + +void PgDatabase::PutLine(char* string) + - The application must explicitly send a single period character (".") + The application must explicitly send the characters \. to indicate to the backend that it has finished sending its data. PgDatabase::EndCopy - syncs with the backend. - - int PgDatabase::EndCopy() - + synchronizes with the backend. + +int PgDatabase::EndCopy() + This function waits until the backend has - finished processing the copy. + finished processing the COPY. It should either be issued when the last string has been sent to the backend using PgDatabase::PutLine @@ -787,7 +795,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: PgDatabase::GetLine. It must be issued or the backend may get out of sync with the frontend. Upon return from this function, the backend is ready to - receive the next query. + receive the next command. The return value is 0 on successful completion, nonzero otherwise. @@ -795,19 +803,20 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.39 2001/11/29 16:01: + As an example: - + PgDatabase data; data.Exec("CREATE TABLE foo (a int4, b char(16), d double precision)"); data.Exec("COPY foo FROM STDIN"); data.PutLine("3\tHello World\t4.5\n"); data.PutLine("4\tGoodbye World\t7.11\n"); -&... +... data.PutLine("\\.\n"); data.EndCopy(); - + diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index c2649dcd64..62f08501fa 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -1,5 +1,5 @@ @@ -9,6 +9,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/libpq.sgml,v 1.84 2001/11/29 16:01:15 peter libpq + + Introduction + libpq is the C application programmer's interface to @@ -40,6 +43,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/libpq.sgml,v 1.84 2001/11/29 16:01:15 peter header file libpq-fe.h and must link with the libpq library. + Database Connection Functions @@ -303,11 +307,11 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn) proceed with the connection sequence. Loop thus: Consider a connection inactive by default. If PQconnectPoll last returned PGRES_POLLING_ACTIVE, consider it active instead. If PQconnectPoll(conn) last returned - PGRES_POLLING_READING, perform a SELECT for reading on PQsocket(conn). If - it last returned PGRES_POLLING_WRITING, perform a SELECT for writing on + PGRES_POLLING_READING, perform a select() for reading on PQsocket(conn). If + it last returned PGRES_POLLING_WRITING, perform a select() for writing on PQsocket(conn). If you have yet to call PQconnectPoll, i.e. after the call to PQconnectStart, behave as if it last returned PGRES_POLLING_WRITING. If - the SELECT shows that the socket is ready, consider it active. If it has + the select() shows that the socket is ready, consider it active. If it has been decided that this connection is active, call PQconnectPoll(conn) again. If this call returns PGRES_POLLING_FAILED, the connection procedure has failed. If this call returns PGRES_POLLING_OK, the connection has been @@ -353,7 +357,7 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn) CONNECTION_AWAITING_RESPONSE - Waiting for a response from the postmaster. + Waiting for a response from the server. @@ -362,7 +366,7 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn) CONNECTION_AUTH_OK - Received authentication; waiting for backend start-up. + Received authentication; waiting for connection start-up to continue. @@ -372,14 +376,14 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn) - Negotiating environment. + Negotiating environment (part of the connection start-up). Note that, although these constants will remain (in order to maintain - compatibility) an application should never rely upon these appearing in a + compatibility), an application should never rely upon these appearing in a particular order, or at all, or on the status always being one of these documented values. An application may do something like this: @@ -487,7 +491,7 @@ void PQreset(PGconn *conn) This function will close the connection to the backend and attempt to reestablish a new - connection to the same postmaster, using all the same + connection to the same server, using all the same parameters previously used. This may be useful for error recovery if a working connection is lost. @@ -505,7 +509,7 @@ int PQresetStart(PGconn *conn); PostgresPollingStatusType PQresetPoll(PGconn *conn); These functions will close the connection to the backend and attempt to - reestablish a new connection to the same postmaster, using all the same + reestablish a new connection to the same server, using all the same parameters previously used. This may be useful for error recovery if a working connection is lost. They differ from PQreset (above) in that they act in a nonblocking manner. These functions suffer from the same @@ -528,7 +532,7 @@ PostgresPollingStatusType PQresetPoll(PGconn *conn); maintain the PGconn abstraction. Use the accessor functions below to get at the contents of PGconn. Avoid directly referencing the fields of the PGconn structure because they are subject to change in the future. -(Beginning in PostgreSQK release 6.4, the +(Beginning in PostgreSQL release 6.4, the definition of struct PGconn is not even provided in libpq-fe.h. If you have old code that accesses PGconn fields directly, you can keep using it by including libpq-int.h too, but you are encouraged to fix the code @@ -623,7 +627,7 @@ ConnStatusType PQstatus(const PGconn *conn) seen outside of an asynchronous connection procedure - CONNECTION_OK or CONNECTION_BAD. A good - connection to the database has the status CONNECTION_OK. + connection to the database has the status CONNECTION_OK. A failed connection attempt is signaled by status CONNECTION_BAD. @@ -705,7 +709,7 @@ SSL *PQgetssl(const PGconn *conn); -Query Execution Functions +Command Execution Functions Once a connection to a database server has been successfully @@ -719,7 +723,7 @@ SQL queries and commands. PQexec - Submit a query to the server + Submit a command to the server and wait for the result. PGresult *PQexec(PGconn *conn, @@ -728,7 +732,7 @@ PGresult *PQexec(PGconn *conn, Returns a PGresult pointer or possibly a NULL pointer. A non-NULL pointer will generally be returned except in out-of-memory conditions or serious errors such as inability - to send the query to the backend. + to send the command to the backend. If a NULL is returned, it should be treated like a PGRES_FATAL_ERROR result. Use PQerrorMessage to get more information about the error. @@ -737,7 +741,7 @@ PGresult *PQexec(PGconn *conn, -The PGresult structure encapsulates the query result +The PGresult structure encapsulates the result returned by the backend. libpq application programmers should be careful to maintain the PGresult abstraction. Use the accessor functions below to get @@ -754,7 +758,7 @@ soon.) PQresultStatus - Returns the result status of the query. + Returns the result status of the command. ExecStatusType PQresultStatus(const PGresult *res) @@ -789,9 +793,9 @@ ExecStatusType PQresultStatus(const PGresult *res) If the result status is PGRES_TUPLES_OK, then the routines described below can be used to retrieve the -tuples returned by the query. Note that a SELECT that -happens to retrieve zero tuples still shows PGRES_TUPLES_OK. -PGRES_COMMAND_OK is for commands that can never return tuples +rows returned by the query. Note that a SELECT command that +happens to retrieve zero rows still shows PGRES_TUPLES_OK. +PGRES_COMMAND_OK is for commands that can never return rows (INSERT, UPDATE, etc.). A response of PGRES_EMPTY_QUERY often exposes a bug in the client software. @@ -875,8 +879,8 @@ as with a PGresult returned by libpq If you want to include strings that have been received -from a source that is not trustworthy (for example, because they were -transmitted across a network), you cannot directly include them in SQL +from a source that is not trustworthy (for example, because a random user +entered them), you cannot directly include them in SQL queries for security reasons. Instead, you have to quote special characters that are otherwise interpreted by the SQL parser. @@ -884,20 +888,20 @@ characters that are otherwise interpreted by the SQL parser. PQescapeString performs this operation. The from points to the first character of the string that is to be escaped, and the length parameter counts the -number of characters in this string (a terminating NUL character is +number of characters in this string (a terminating zero byte is neither necessary nor counted). to shall point to a buffer that is able to hold at least one more character than twice the value of length, otherwise the behavior is undefined. A call to PQescapeString writes an escaped version of the from string to the to buffer, replacing special characters so that they cannot cause any -harm, and adding a terminating NUL character. The single quotes that +harm, and adding a terminating zero byte. The single quotes that must surround PostgreSQL string literals are not part of the result string. PQescapeString returns the number of characters written -to to, not including the terminating NUL character. +to to, not including the terminating zero byte. Behavior is undefined when the to and from strings overlap. @@ -911,21 +915,22 @@ strings overlap. PQescapeBytea - Escapes a binary string (bytea type) for use within an SQL query. + Escapes a binary string (bytea type) for use within an SQL query. unsigned char *PQescapeBytea(unsigned char *from, size_t from_length, size_t *to_length); - Certain ASCII characters MUST be escaped (but all - characters MAY be escaped) when used as part of a BYTEA + Certain ASCII characters must + be escaped (but all characters may be escaped) + when used as part of a bytea string literal in an SQL statement. In general, to escape a character, it is converted into the three digit octal number equal to the decimal ASCII value, and preceded by two backslashes. The single quote (') and backslash (\) characters have - special alternate escape sequences. See the Binary String data type - in the User's Guide for more information. PQescapeBytea + special alternate escape sequences. See the User's Guide + for more information. PQescapeBytea performs this operation, escaping only the minimally required characters. @@ -934,20 +939,20 @@ strings overlap. The from parameter points to the first character of the string that is to be escaped, and the from_length parameter reflects the number of - characters in this binary string (a terminating NUL character is + characters in this binary string (a terminating zero byte is neither necessary nor counted). The to_length parameter shall point to a buffer suitable to hold the resultant escaped string length. The result string length does not - include the terminating NUL character of the result. + include the terminating zero byte of the result. PQescapeBytea returns an escaped version of the - from parameter binary string, to a caller - provided buffer. The return string has all special characters replaced + from parameter binary string, to a caller-provided + buffer. The return string has all special characters replaced so that they can be properly processed by the PostgreSQL string literal - parser, and the bytea input function. A terminating NUL - character is also added. The single quotes that must surround + parser, and the bytea input function. A terminating zero + byte is also added. The single quotes that must surround PostgreSQL string literals are not part of the result string. @@ -972,7 +977,7 @@ int PQntuples(const PGresult *res); PQnfields Returns the number of fields - (attributes) in each tuple of the query result. + (columns) in each row of the query result. int PQnfields(const PGresult *res); @@ -983,7 +988,7 @@ int PQnfields(const PGresult *res); PQfname - Returns the field (attribute) name associated with the given field index. + Returns the field (column) name associated with the given field index. Field indices start at 0. char *PQfname(const PGresult *res, @@ -995,7 +1000,7 @@ char *PQfname(const PGresult *res, PQfnumber - Returns the field (attribute) index + Returns the field (column) index associated with the given field name. int PQfnumber(const PGresult *res, @@ -1065,7 +1070,7 @@ int PQfsize(const PGresult *res, int PQbinaryTuples(const PGresult *res); Currently, binary tuple data can only be returned by a query that -extracts data from a BINARY cursor. +extracts data from a binary cursor. @@ -1078,7 +1083,7 @@ extracts data from a BINARY cursor. PQgetvalue - Returns a single field (attribute) value of one tuple + Returns a single field (column) value of one tuple (row) of a PGresult. Tuple and field indices start at 0. @@ -1087,7 +1092,7 @@ char* PQgetvalue(const PGresult *res, int field_num); For most queries, the value returned by PQgetvalue -is a null-terminated ASCII string representation +is a null-terminated character string representation of the attribute value. But if PQbinaryTuples() is 1, the value returned by PQgetvalue is the binary representation of the @@ -1199,7 +1204,7 @@ char * PQcmdTuples(const PGresult *res); PQoidValue - Returns the object id of the tuple inserted, if the + Returns the object ID of the inserted row, if the SQL command was an INSERT that inserted exactly one row into a table that has OIDs. Otherwise, returns InvalidOid. @@ -1215,7 +1220,7 @@ Oid PQoidValue(const PGresult *res); PQoidStatus - Returns a string with the object id of the tuple inserted, if the + Returns a string with the object ID of the inserted row, if the SQL command was an INSERT. (The string will be 0 if the INSERT did not insert exactly one row, or if the target table does not have OIDs.) If the command @@ -1238,14 +1243,14 @@ and is not thread-safe. nonblocking connection -The PQexec function is adequate for submitting queries in +The PQexec function is adequate for submitting commands in simple synchronous applications. It has a couple of major deficiencies however: -PQexec waits for the query to be completed. The application may have other +PQexec waits for the command to be completed. The application may have other work to do (such as maintaining a user interface), in which case it won't want to block waiting for the response. @@ -1253,13 +1258,13 @@ want to block waiting for the response. Since control is buried inside PQexec, it is hard for the frontend -to decide it would like to try to cancel the ongoing query. (It can be +to decide it would like to try to cancel the ongoing command. (It can be done from a signal handler, but not otherwise.) -PQexec can return only one PGresult structure. If the submitted query +PQexec can return only one PGresult structure. If the submitted command string contains multiple SQL commands, all but the last PGresult are discarded by PQexec. @@ -1275,7 +1280,7 @@ underlying functions that PQexec is built from: Older programs that used this functionality as well as PQputline and PQputnbytes -could block waiting to send data to the backend, to +could block waiting to send data to the backend. To address that issue, the function PQsetnonblocking was added. @@ -1304,7 +1309,7 @@ int PQsetnonblocking(PGconn *conn, int arg) again. - When a database connection has been set to non-blocking mode and + When a database connection has been set to nonblocking mode and PQexec is called, it will temporarily set the state of the connection to blocking until the PQexec completes. @@ -1330,19 +1335,19 @@ int PQisnonblocking(const PGconn *conn) PQsendQuery - Submit a query to PostgreSQL without - waiting for the result(s). 1 is returned if the query was + Submit a command to the server without + waiting for the result(s). 1 is returned if the command was successfully dispatched, 0 if not (in which case, use - PQerrorMessage to get more information about the failure). + PQerrorMessage to get more information about the failure). int PQsendQuery(PGconn *conn, const char *query); After successfully calling PQsendQuery, call PQgetResult one or more - times to obtain the query results. PQsendQuery may not be called + times to obtain the results. PQsendQuery may not be called again (on the same connection) until PQgetResult has returned NULL, - indicating that the query is done. + indicating that the command is done. @@ -1356,9 +1361,9 @@ int PQsendQuery(PGconn *conn, PGresult *PQgetResult(PGconn *conn); PQgetResult must be called repeatedly until it returns NULL, - indicating that the query is done. (If called when no query is + indicating that the command is done. (If called when no command is active, PQgetResult will just return NULL at once.) - Each non-null result from PQgetResult should be processed using + Each non-NULL result from PQgetResult should be processed using the same PGresult accessor functions previously described. Don't forget to free each result object with PQclear when done with it. Note that PQgetResult will block only if a query is active and the @@ -1372,11 +1377,11 @@ PGresult *PQgetResult(PGconn *conn); Using PQsendQuery and PQgetResult solves one of PQexec's problems: -If a query string contains multiple SQL commands, the results of those +If a command string contains multiple SQL commands, the results of those commands can be obtained individually. (This allows a simple form of overlapped processing, by the way: the frontend can be handling the results of one query while the backend is still working on later -queries in the same query string.) However, calling PQgetResult will +queries in the same command string.) However, calling PQgetResult will still cause the frontend to block until the backend completes the next SQL command. This can be avoided by proper use of three more functions: @@ -1401,9 +1406,9 @@ their state has changed. PQconsumeInput may be called even if the application is not prepared to deal with a result or notification just yet. The routine will read available data and save it in a buffer, thereby -causing a select(2) read-ready indication to go away. The +causing a select() read-ready indication to go away. The application can thus use PQconsumeInput to clear the -select condition immediately, and then examine the results at leisure. +select() condition immediately, and then examine the results at leisure. @@ -1425,13 +1430,13 @@ state will never end. PQflush Attempt to flush any data queued to the backend, -returns 0 if successful (or if the send queue is empty) or EOF if it failed for +returns 0 if successful (or if the send queue is empty) or EOF if it failed for some reason. int PQflush(PGconn *conn); PQflush needs to be called on a nonblocking connection -before calling select to determine if a response has +before calling select() to determine if a response has arrived. If 0 is returned it ensures that there is no data queued to the backend that has not actually been sent. Only applications that have used PQsetnonblocking have a need for this. @@ -1448,17 +1453,17 @@ backend that has not actually been sent. Only applications that have used int PQsocket(const PGconn *conn); PQsocket should be used to obtain the backend socket descriptor -in preparation for executing select(2). This allows an +in preparation for executing select(). This allows an application using a blocking connection to wait for either backend responses or other conditions. -If the result of select(2) indicates that data can be read from +If the result of select() indicates that data can be read from the backend socket, then PQconsumeInput should be called to read the data; after which, PQisBusy, PQgetResult, and/or PQnotifies can be used to process the response. Nonblocking connections (that have used PQsetnonblocking) -should not use select until PQflush +should not use select() until PQflush has returned 0 indicating that there is no buffered data waiting to be sent to the backend. @@ -1469,7 +1474,7 @@ to the backend. A typical frontend using these functions will have a main loop that uses -select(2) to wait for all the conditions that it must +select to wait for all the conditions that it must respond to. One of the conditions will be input available from the backend, which in select's terms is readable data on the file descriptor identified by PQsocket. @@ -1482,7 +1487,7 @@ if PQisBusy returns false (0). It can also call A frontend that uses PQsendQuery/PQgetResult -can also attempt to cancel a query that is still being processed by the backend. +can also attempt to cancel a command that is still being processed by the backend. @@ -1491,7 +1496,7 @@ can also attempt to cancel a query that is still being processed by the backend. PQrequestCancel Request that PostgreSQL abandon - processing of the current query. + processing of the current command. int PQrequestCancel(PGconn *conn); @@ -1501,9 +1506,9 @@ Successful dispatch is no guarantee that the request will have any effect, however. Regardless of the return value of PQrequestCancel, the application must continue with the normal result-reading sequence using PQgetResult. If the cancellation -is effective, the current query will terminate early and return +is effective, the current command will terminate early and return an error result. If the cancellation fails (say, because the -backend was already done processing the query), then there will +backend was already done processing the command), then there will be no visible result at all. @@ -1511,7 +1516,7 @@ be no visible result at all. -Note that if the current query is part of a transaction, cancellation +Note that if the current command is part of a transaction, cancellation will abort the whole transaction. @@ -1523,16 +1528,16 @@ handler. For example, psql invokes PQrequestCancel from a SIGINT signal handler, thus allowing interactive cancellation of queries that it issues through PQexec. Note that PQrequestCancel will have no effect if the connection -is not currently open or the backend is not currently processing a query. +is not currently open or the backend is not currently processing a command. -Fast Path +The Fast-Path Interface -PostgreSQL provides a fast path interface to send +PostgreSQL provides a fast-path interface to send function calls to the backend. This is a trapdoor into system internals and can be a potential security hole. Most users will not need this feature. @@ -1540,7 +1545,7 @@ can be a potential security hole. Most users will not need this feature. PQfn - Request execution of a backend function via the fast path interface. + Request execution of a backend function via the fast-path interface. PGresult* PQfn(PGconn* conn, int fnid, @@ -1560,7 +1565,7 @@ PGresult* PQfn(PGconn* conn, result_is_int to 1; otherwise set it to 0. (Setting result_is_int to 1 tells libpq to byte-swap the value if necessary, so that it is delivered as a proper int value for the client machine. When - result_is_int is 0, the byte string sent by the backend is returned + result_is_int is 0, the byte string sent by the backend is returned unmodified.) args and nargs specify the arguments to be passed to the function. @@ -1604,7 +1609,7 @@ not necessary for there to be any associated relation. libpq applications submit LISTEN and UNLISTEN -commands as ordinary SQL queries. Subsequently, arrival of NOTIFY +commands as ordinary SQL command. Subsequently, arrival of NOTIFY messages can be detected by calling PQnotifies. @@ -1658,10 +1663,10 @@ A better way to check for NOTIFY messages when you have no useful queries to make is to call PQconsumeInput(), then check PQnotifies(). -You can use select(2) to wait for backend data to +You can use select() to wait for backend data to arrive, thereby using no CPU power unless there is something to do. (See PQsocket() to obtain the file descriptor -number to use with select.) +number to use with select().) Note that this will work OK whether you submit queries with PQsendQuery/PQgetResult or simply use PQexec. You should, however, remember to @@ -1706,10 +1711,10 @@ int PQgetline(PGconn *conn, char *string, int length) -Like fgets(3), this routine copies up to length-1 characters -into string. It is like gets(3), however, in that it converts -the terminating newline into a null character. -PQgetline returns EOF at EOF, 0 if the +Like fgets, this routine copies up to length-1 characters +into string. It is like gets, however, in that it converts +the terminating newline into a zero byte. +PQgetline returns EOF at the end of input, 0 if the entire line has been read, and 1 if the buffer is full but the terminating newline has not yet been read. @@ -1776,7 +1781,7 @@ actually available.) PQputline Sends a null-terminated string to the backend server. -Returns 0 if OK, EOF if unable to send the string. +Returns 0 if OK, EOF if unable to send the string. int PQputline(PGconn *conn, const char *string); @@ -1791,7 +1796,7 @@ the backend that it has finished sending its data. PQputnbytes Sends a non-null-terminated string to the backend server. -Returns 0 if OK, EOF if unable to send the string. +Returns 0 if OK, EOF if unable to send the string. int PQputnbytes(PGconn *conn, const char *buffer, @@ -1806,7 +1811,7 @@ specified directly. PQendcopy - Syncs with the backend. This function waits until + Synchronizes with the backend. This function waits until the backend has finished the copy. It should either be issued when the last string has been sent to the backend using PQputline or when the @@ -1814,7 +1819,7 @@ specified directly. using PGgetline. It must be issued or the backend may get out of sync with the frontend. Upon return from this function, the backend is ready to - receive the next query. + receive the next SQL command. The return value is 0 on successful completion, nonzero otherwise. @@ -1856,7 +1861,7 @@ Older applications are likely to submit a copy in or copy out via PQexec and assume that the transaction is done after PQendcopy. This will work correctly only if the copy in/out is the only -SQL command in the query string. +SQL command in the command string. @@ -2065,7 +2070,7 @@ sets the default time zone. PGCLIENTENCODING -sets the default client encoding (if MULTIBYTE support was selected +sets the default client encoding (if multibyte support was selected when configuring PostgreSQL). diff --git a/doc/src/sgml/lobj.sgml b/doc/src/sgml/lobj.sgml index a6e31bef46..29a6a19635 100644 --- a/doc/src/sgml/lobj.sgml +++ b/doc/src/sgml/lobj.sgml @@ -1,5 +1,5 @@ @@ -106,19 +106,19 @@ $Header: /cvsroot/pgsql/doc/src/sgml/lobj.sgml,v 1.24 2001/11/12 19:19:39 petere The PostgreSQL large object interface is modeled after - the Unix file system interface, with analogues of + the Unix file-system interface, with analogues of open(2), read(2), write(2), lseek(2), etc. User functions call these routines to retrieve only the data of interest from a large object. For example, if a large object type called mugshot existed that stored - photographs of faces, then a function called beard could - be declared on mugshot data. Beard could look at the + photographs of faces, then a function called beard could + be declared on mugshot data. beard could look at the lower third of a photograph, and determine the color of the beard that appeared there, if any. The entire - large object value need not be buffered, or even - examined, by the beard function. + large-object value need not be buffered, or even + examined, by the beard function. Large objects may be accessed from dynamically-loaded C functions or database client programs that link the library. PostgreSQL provides a set of routines that @@ -140,7 +140,7 @@ Oid lo_creat(PGconn *conn, int libpq/libpq-fs.h. The access type (read, write, or both) is controlled by - OR'ing together the bits INV_READ and + or'ing together the bits INV_READ and INV_WRITE. The low-order sixteen bits of the mask have historically been used at Berkeley to designate the storage manager number on which the large object should reside. These @@ -156,12 +156,12 @@ inv_oid = lo_creat(INV_READ|INV_WRITE); Importing a Large Object - To import a UNIX file as a large object, call + To import an operating system file as a large object, call Oid lo_import(PGconn *conn, const char *filename) filename - specifies the Unix path name of + specifies the operating system name of the file to be imported as a large object. @@ -171,13 +171,13 @@ Oid lo_import(PGconn *conn, const c To export a large object - into UNIX file, call + into an operating system file, call int lo_export(PGconn *conn, Oid lobjId, const char *filename) The lobjId argument specifies the Oid of the large object to export and the filename argument specifies - the UNIX path name of the file. + the operating system name name of the file. @@ -191,7 +191,7 @@ int lo_open(PGconn *conn, Oid lobjId, int mode) The lobjId argument specifies the Oid of the large object to open. The mode bits control whether the - object is opened for reading (INV_READ), writing or + object is opened for reading (INV_READ), writing (INV_WRITE), or both. A large object cannot be opened before it is created. lo_open returns a large object descriptor @@ -241,9 +241,9 @@ int lo_read(PGconn *conn, int fd, char *buf, size_t len) int lo_lseek(PGconn *conn, int fd, int offset, int whence) This routine moves the current location pointer for the - large object described by fd to the new location specified - by offset. The valid values for whence are - SEEK_SET, SEEK_CUR, and SEEK_END. + large object described by fd to the new location specified + by offset. The valid values for whence are + SEEK_SET, SEEK_CUR, and SEEK_END. @@ -255,7 +255,7 @@ int lo_lseek(PGconn *conn, int fd, int offset, int whence) int lo_close(PGconn *conn, int fd) - where fd is a large object descriptor returned by + where fd is a large object descriptor returned by lo_open. On success, lo_close returns zero. On error, the return value is negative. @@ -281,8 +281,8 @@ Oid lo_unlink(PGconn *conn, Oid lob Server-side Built-in Functions - There are two built-in registered functions, lo_import - and lo_export which are convenient for use + There are two built-in registered functions, lo_import + and lo_export which are convenient for use in SQL queries. Here is an example of their use @@ -295,7 +295,7 @@ CREATE TABLE image ( INSERT INTO image (name, raster) VALUES ('beautiful image', lo_import('/etc/motd')); -SELECT lo_export(image.raster, '/tmp/motd') from image +SELECT lo_export(image.raster, '/tmp/motd') FROM image WHERE name = 'beautiful image'; @@ -305,24 +305,18 @@ SELECT lo_export(image.raster, '/tmp/motd') from image Accessing Large Objects from <application>Libpq</application> - Below is a sample program which shows how the large object + is a sample program which shows how the large object interface in libpq can be used. Parts of the program are - commented out but are left in the source for the readers + commented out but are left in the source for the reader's benefit. This program can be found in - -../src/test/examples - + src/test/examples/testlo.c in the source distribution. Frontend applications which use the large object interface in libpq should include the header file libpq/libpq-fs.h and link with the libpq library. - - - -Example Program - + Large Objects with <application>Libpq</application> Example Program /*-------------------------------------------------------------- diff --git a/doc/src/sgml/odbc.sgml b/doc/src/sgml/odbc.sgml index facc55010a..af6e2a8aef 100644 --- a/doc/src/sgml/odbc.sgml +++ b/doc/src/sgml/odbc.sgml @@ -1,5 +1,5 @@ @@ -23,6 +23,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.28 2001/11/21 05:53:41 ODBC + + Introduction + Background information originally by Tim Goeke @@ -38,12 +41,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.28 2001/11/21 05:53:41 ODBC provides a product-neutral interface between frontend applications and database servers, allowing a user or developer to write applications that are - transportable between servers from different manufacturers.. + portable between servers from different manufacturers.. - - Background - The ODBC API matches up on the backend to an ODBC-compatible data source. @@ -52,9 +52,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.28 2001/11/21 05:53:41 - The backend access come from ODBC drivers, + The backend access comes from ODBC drivers, or vendor-specific drivers that - allow data access. psqlODBC is such a driver, + allow data access. psqlODBC, which is included in the PostgreSQL distribution, is such a driver, along with others that are available, such as the OpenLink ODBC drivers. @@ -62,7 +62,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.28 2001/11/21 05:53:41 Once you write an ODBC application, you should be able to connect to any - back end database, regardless of the vendor, as long as the database schema + back-end database, regardless of the vendor, as long as the database schema is the same. @@ -71,8 +71,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.28 2001/11/21 05:53:41 and PostgreSQL servers that have exactly the same data. Using ODBC, your Windows application would make exactly the - same calls and the back end data source would look the same (to the Windows - app). + same calls and the back-end data source would look the same (to the Windows + application). @@ -173,7 +173,7 @@ psql -d template1 -f LOCATION/odbc.sql ~/.odbc.ini contains user-specified access information for the psqlODBC driver. The file uses conventions typical for Windows - Registry files, but despite this restriction can be made to work. + Registry files. @@ -186,12 +186,12 @@ psql -d template1 -f LOCATION/odbc.sql Each section must be labeled with the name given in [ODBC Data Sources] and must contain the following entries: - + Driver = prefix/lib/libpsqlodbc.so -Database=DatabaseName -Servername=localhost -Port=5432 - +Database = DatabaseName +Servername = localhost +Port = 5432 + @@ -212,7 +212,7 @@ Port=5432 Here is an example .odbc.ini file, showing access information for three databases: - + [ODBC Data Sources] DataEntry = Read/Write Database QueryOnly = Read-only Database @@ -246,7 +246,7 @@ Driver = /opt/postgres/current/lib/libpsqlodbc.so [ODBC] InstallDir = /opt/applix/axdata/axshlib - + @@ -298,19 +298,20 @@ InstallDir = /opt/applix/axdata/axshlib The ODBC API is the way to go. For Visual C++ coding you can find out more at - Microsoft's web site or in your VC++ docs. + Microsoft's web site or in your Visual C++ + documentation. Visual Basic and the other RAD tools have Recordset objects that use ODBC directly to access data. Using the data-aware controls, you can quickly - link to the ODBC back end database + link to the ODBC back-end database (very quickly). - Playing around with MS Access will help you sort this out. Try using + Playing around with MS Access will help you sort this out. Try using FileGet External Data. @@ -354,7 +355,7 @@ InstallDir = /opt/applix/axdata/axshlib Enabling <application>ApplixWare</application> Database Access - These instructions are for the 4.4.2 release of + These instructions are for the 4.4.2 release of ApplixWare on Linux. Refer to the Linux Sys Admin on-line book for more detailed information. @@ -411,7 +412,7 @@ libFor elfodbc applixroot/applix/axdata/axshlib/lib Create .odbc.ini as - described above. You may also want to add the flag + described in . You may also want to add the flag TextAsLongVarchar=0 @@ -476,7 +477,7 @@ TextAsLongVarchar=0 - The Ready message will appear in the lower left corner of the data + The Ready message will appear in the lower left corner of the data window. This indicates that you can now enter queries. @@ -549,7 +550,7 @@ TextAsLongVarchar=0 - The September release of ApplixWare v4.4.1 (the first release with official + The September release of ApplixWare 4.4.1 (the first release with official ODBC support under Linux) shows problems when user names exceed eight (8) characters in length. Problem description contributed by Steve Campbell @@ -574,7 +575,7 @@ TextAsLongVarchar=0 The axnet program's security system seems a little suspect. axnet does things on behalf of the user and on a true - multiple user system it really should be run with root security + multiuser system it really should be run with root security (so it can read/write in each user's directory). I would hesitate to recommend this, however, since we have no idea what security holes this creates. @@ -633,7 +634,7 @@ cary 27883 0.9 31.0 12692 4596 ? S 10:24 0:04 axmain Many of the error messages from ApplixWare go to stderr, but I'm not sure where stderr - is sent, so strace is the way to find out. + is sent, so strace is the way to find out. @@ -641,16 +642,13 @@ cary 27883 0.9 31.0 12692 4596 ? S 10:24 0:04 axmain For example, after getting - a Cannot launch gateway on server, + a Cannot launch gateway on server, I ran strace on axnet and got -[pid 27947] open("/usr/lib/libodbc.so", O_RDONLY) = -1 ENOENT -(No such file or directory) -[pid 27947] open("/lib/libodbc.so", O_RDONLY) = -1 ENOENT -(No such file or directory) -[pid 27947] write(2, "/usr2/applix/axdata/elfodbc: -can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) +[pid 27947] open("/usr/lib/libodbc.so", O_RDONLY) = -1 ENOENT (No such file or directory) +[pid 27947] open("/lib/libodbc.so", O_RDONLY) = -1 ENOENT (No such file or directory) +[pid 27947] write(2, "/usr2/applix/axdata/elfodbc: can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) So what is happening is that applix elfodbc is searching for libodbc.so, but it cannot find it. That is why axnet.cnf needed to be changed. @@ -660,6 +658,8 @@ can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) Running the <application>ApplixWare</application> Demo + I think the condition this refers to is gone. -- petere 2002-01-07 + In order to go through the ApplixWare Data Tutorial, you need to create @@ -668,6 +668,7 @@ can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) on many of the database columns, and PostgreSQL does not currently allow this option. + To get around this problem, you can do the following: @@ -717,7 +718,7 @@ can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) - Select File->Compile and Save. + Select FileCompile and Save. @@ -735,13 +736,13 @@ can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) - Select *->Run Macro + Select *Run Macro. - Enter the value sqldemo, then click OK. + Enter the value sqldemo, then click OK. @@ -757,6 +758,7 @@ can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) + Useful Macros @@ -766,12 +768,12 @@ can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) macro file. This is an example ~/axhome/macros/login.am file: - + macro login set_set_system_var@("sql_username@","tgl") set_system_var@("sql_passwd@","no$way") endmacro - + diff --git a/doc/src/sgml/plperl.sgml b/doc/src/sgml/plperl.sgml index 978abd3af1..6cf9e83069 100644 --- a/doc/src/sgml/plperl.sgml +++ b/doc/src/sgml/plperl.sgml @@ -1,5 +1,5 @@ @@ -13,15 +13,19 @@ $Header: /cvsroot/pgsql/doc/src/sgml/plperl.sgml,v 2.11 2001/11/21 05:53:41 thom Perl + + Introduction + - PL/Perl allows you to write functions in the Perl programming - language that may be used in SQL queries as if they were built into + PL/Perl allows you to write functions in the Perl programming language that may + be used in SQL queries as if they were built into PostgreSQL. The PL/Perl interpreter (when installed as trusted interpreter with - default name plperl) interpreter is a full Perl interpreter. However, certain + default name plperl) is a full Perl interpreter. However, certain operations have been disabled in order to maintain the security of the system. In general, the operations that are restricted are those that interact with the environment. This includes file handle @@ -33,8 +37,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/plperl.sgml,v 2.11 2001/11/21 05:53:41 thom When PL/Perl is installed as untrusted interpreter (with name plperlu), - everything is permitted, and any Perl code can be loaded (by superuser only). + everything is permitted, and any Perl code can be loaded (by a superuser only). + Building and Installing @@ -54,10 +59,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/plperl.sgml,v 2.11 2001/11/21 05:53:41 thom fact: -***** -* Cannot build PL/Perl because libperl is not a shared library. -* Skipped. -***** +*** Cannot build PL/Perl because libperl is not a shared library. +*** You might have to rebuild your Perl installation. Refer to +*** the documentation for details. Therefore it is likely that you will have to re-build and install @@ -84,7 +88,7 @@ gmake install $ createlang plperl template1 Alternatively, to create untrusted interpreter (where functions can only -be created by superuser, but the functions are not restricted), use: +be created by a superuser, but the functions are not restricted), use: $ createlang plperlu template1 @@ -99,7 +103,7 @@ be created by superuser, but the functions are not restricted), use: Assume you have the following table: -CREATE TABLE EMPLOYEE ( +CREATE TABLE employee ( name text, basesalary integer, bonus integer @@ -111,7 +115,7 @@ CREATE TABLE EMPLOYEE ( CREATE FUNCTION totalcomp(integer, integer) RETURNS integer AS 'return $_[0] + $_[1]' - LANGUAGE 'plperl'; + LANGUAGE plperl; Notice that the arguments to the function are passed in @@ -131,7 +135,7 @@ SELECT name, totalcomp(basesalary, bonus) FROM employee; CREATE FUNCTION empcomp(employee) RETURNS integer AS ' my $emp = shift; return $emp->{''basesalary''} + $emp->{''bonus''}; -' LANGUAGE 'plperl'; +' LANGUAGE plperl; A tuple is passed as a reference to a hash. The keys are the names of the fields in the tuples. The hash values are values of the @@ -151,7 +155,7 @@ CREATE FUNCTION empcomp(employee) RETURNS integer AS ' - The new function empcomp can used like: + The new function empcomp can be used like: SELECT name, empcomp(employee) FROM employee; @@ -165,18 +169,22 @@ CREATE FUNCTION badfunc() RETURNS integer AS ' open(TEMP, ">/tmp/badfile"); print TEMP "Gotcha!\n"; return 1; -' LANGUAGE 'plperl'; +' LANGUAGE plperl; The creation of the function will succeed, but executing it will not. - + + Note that if same function was created by superuser using language plperlu, execution would succeed. - Access to database itself from your Perl function can be done via - an experimental module DBD::PgSPI. This module makes available a DBI-compliant - database-handle named $pg_dbh, and you can use that to make queries with - normal DBI syntax. + Access to database itself from your Perl function can be done via + an experimental module DBD::PgSPI + (also on CPAN). This + module makes available a DBI-compliant database-handle + named $pg_dbh, and you can use that to perform + queries with normal DBI syntax. diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml index 485eea06ca..768c26b569 100644 --- a/doc/src/sgml/plpython.sgml +++ b/doc/src/sgml/plpython.sgml @@ -1,4 +1,4 @@ - + PL/Python - Python Procedural Language @@ -6,44 +6,74 @@ PL/Python Python - + + Introduction + - This chapter is not fully developed yet. + The PL/Python procedural language allows + PostgreSQL functions to be written in + the Python language. + - The current version of PL/Python functions as a trusted language only; - access to the filesystem and other local resources are disabled. - Specifically, PL/Python uses the Python restricted execution environment, - further restricts it to prevent the use of the file open call, and - allows only modules from a specific list to be imported. Presently, - that list includes: array, bisect, binascii, calendar, cmath, codecs, - errno, marshal, math, md5, mpz, operator, pcre, pickle, random, re, - regex, sre, sha, string, StringIO, struct, time, whrandom, and zlib. + The current version of PL/Python + functions as a trusted language only; access to the file system and + other local resources is disabled. Specifically, + PL/Python uses the Python restricted + execution environment, further restricts it to prevent the use of + the file open call, and allows only modules from a + specific list to be imported. Presently, that list includes: + array, bisect, binascii, calendar, cmath, codecs, errno, marshal, + math, md5, mpz, operator, pcre, pickle, random, re, regex, sre, + sha, string, StringIO, struct, time, whrandom, and zlib. + - There is discussion on creating an untrusted language variant for a + In the current version, any database error encountered while + running a PL/Python function will result + in the immediate termination of that function by the server. It is + not possible to trap error conditions using Python try + ... catch constructs. For example, a syntax error in an + SQL statement passed to the plpy.execute() call + will terminate the function. This behavior may be changed in a future release. - - In the current version, any postgresql error encountered while running - a PL/Python function will result in the immediate termination of that - function by the backend. It is not possible to trap error conditions - using Python try ... catch constructs. For example, a syntax error in - an SQL statement passed to the plpy.execute() call will terminate the - function. This behavior may be changed in a future release. - - + Installation - ... needs to be worked out. + To build PL/Python, the option needs + to be specified when running configure. If + after building and installing you have a file called + plpython.so (possibly a different extension), + then everything went well. Otherwise you should have seen a notice + like this flying by: + +*** Cannot build PL/Python because libpython is not a shared library. +*** You might have to rebuild your Python installation. Refer to +*** the documentation for details. + + That means you have to rebuild (part of) your Python installation + to supply this shared library. + + + + The catch is that the Python distribution or the Python maintainers + do not provide any direct way to do this. The closest thing we can + offer you is the information in Python FAQ + 3.30. On some operating systems you don't really have to + build a shared library, but then you will have to convince the + PostgreSQL build system of this. Consult the + Makefile in the + src/pl/plpython directory for details. - Using + Using PL/Python There are sample functions in diff --git a/doc/src/sgml/plsql.sgml b/doc/src/sgml/plsql.sgml index 7ec861d850..b05e965bd4 100644 --- a/doc/src/sgml/plsql.sgml +++ b/doc/src/sgml/plsql.sgml @@ -1,5 +1,5 @@ @@ -1700,7 +1700,7 @@ RAISE EXCEPTION ''Inexistent ID --> %'',user_id; It is possible to hook into the error mechanism to notice that this happens. But currently it is impossible to tell what really - caused the abort (input/output conversion error, floating point + caused the abort (input/output conversion error, floating-point error, parse error). And it is possible that the database backend is in an inconsistent state at this point so returning to the upper executor or issuing more commands might corrupt the whole database. diff --git a/doc/src/sgml/problems.sgml b/doc/src/sgml/problems.sgml index 5895c34dba..18c534f0ba 100644 --- a/doc/src/sgml/problems.sgml +++ b/doc/src/sgml/problems.sgml @@ -1,5 +1,5 @@ @@ -252,7 +252,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/problems.sgml,v 2.11 2001/11/21 05:53:41 th Do not spend all your time to figure out which changes in the input make the problem go away. This will probably not help solving it. If it turns out that the bug cannot be fixed right away, you will still have time to - find and share your work around. Also, once again, do not waste your time + find and share your work-around. Also, once again, do not waste your time guessing why the bug exists. We will find that out soon enough. diff --git a/doc/src/sgml/programmer.sgml b/doc/src/sgml/programmer.sgml index 52456c0583..0777979e8b 100644 --- a/doc/src/sgml/programmer.sgml +++ b/doc/src/sgml/programmer.sgml @@ -1,5 +1,5 @@ @@ -16,30 +16,17 @@ PostgreSQL Programmer's Guide. &intro; ]]> - - Organization - - - The first part of this manual is the description of the client-side - programming interfaces and support libraries for various languages. - The second part explains the PostgreSQL - approach to extensibility and describe how users can extend - PostgreSQL by adding user-defined types, - operators, aggregates, and both query language and programming - language functions. After a discussion of the - PostgreSQL rule system, we discuss the - trigger and SPI interfaces. The third part documents the - procedural languages available in the - PostgreSQL distribution. - - - - Proficiency with Unix and C programming is assumed. - - - Client Interfaces + + + + This part of the manual is the description of the client-side + programming interfaces and support libraries for various + languages. + + + &libpq; &lobj; &libpqpp; @@ -53,6 +40,20 @@ PostgreSQL Programmer's Guide. Server Programming + + + + This second part of the manual explains the + PostgreSQL approach to extensibility + and describe how users can extend + PostgreSQL by adding user-defined + types, operators, aggregates, and both query language and + programming language functions. After a discussion of the + PostgreSQL rule system, we discuss the + trigger and SPI interfaces. + + + &arch-pg; &extend; &xfunc; @@ -70,6 +71,14 @@ PostgreSQL Programmer's Guide. Procedural Languages + + + This part documents the procedural languages available in the + PostgreSQL distribution as well as + general issues concerning procedural languages. + + + &xplang; &plsql; &pltcl; diff --git a/doc/src/sgml/query.sgml b/doc/src/sgml/query.sgml index a6cfe62cd1..c5cc76ff05 100644 --- a/doc/src/sgml/query.sgml +++ b/doc/src/sgml/query.sgml @@ -1,5 +1,5 @@ @@ -137,7 +137,7 @@ CREATE TABLE weather ( varchar(80) specifies a data type that can store arbitrary character strings up to 80 characters in length. int is the normal integer type. real is - a type for storing single precision floating point numbers. + a type for storing single precision floating-point numbers. date should be self-explanatory. (Yes, the column of type date is also named date. This may be convenient or confusing -- you choose.) diff --git a/doc/src/sgml/ref/create_operator.sgml b/doc/src/sgml/ref/create_operator.sgml index 11debc3d3a..93cc39770e 100644 --- a/doc/src/sgml/ref/create_operator.sgml +++ b/doc/src/sgml/ref/create_operator.sgml @@ -1,5 +1,5 @@ @@ -343,7 +343,7 @@ MYBOXES.description <<< box '((0,0), (1,1))' res_proc must be a registered function (meaning it is already defined using CREATE FUNCTION) which accepts arguments of the correct - data types and returns a floating point number. The + data types and returns a floating-point number. The query optimizer simply calls this function, passing the parameter ((0,0), (1,1)) and multiplies the result by the relation size to get the expected number of instances. @@ -352,7 +352,7 @@ MYBOXES.description <<< box '((0,0), (1,1))' Similarly, when the operands of the operator both contain instance variables, the query optimizer must estimate the size of the resulting join. The function join_proc will - return another floating point number which will be multiplied + return another floating-point number which will be multiplied by the cardinalities of the two tables involved to compute the expected result size. diff --git a/doc/src/sgml/ref/set.sgml b/doc/src/sgml/ref/set.sgml index 6078cc90e5..83ff18824e 100644 --- a/doc/src/sgml/ref/set.sgml +++ b/doc/src/sgml/ref/set.sgml @@ -1,5 +1,5 @@ @@ -195,8 +195,8 @@ SET TIME ZONE { 'timezone' | LOCAL The value for the seed to be used by the random function. Allowed - values are floating point numbers between 0 and 1, which - are then multiplied by 2^31-1. This product will + values are floating-point numbers between 0 and 1, which + are then multiplied by 231-1. This product will silently overflow if a number outside the range is used. diff --git a/doc/src/sgml/regress.sgml b/doc/src/sgml/regress.sgml index 027ea381d1..291cb4838f 100644 --- a/doc/src/sgml/regress.sgml +++ b/doc/src/sgml/regress.sgml @@ -1,4 +1,4 @@ - + Regression Tests @@ -135,7 +135,7 @@ Some properly installed and fully functional PostgreSQL installations can fail some of these regression tests due to - platform-specific artifacts such as varying floating point representation + platform-specific artifacts such as varying floating-point representation and time zone support. The tests are currently evaluated using a simple diff comparison against the outputs generated on a reference system, so the results are sensitive to @@ -248,7 +248,7 @@ PGTZ='PST8PDT7,M04.01.0,M10.05.03'; export PGTZ - Floating point differences + Floating-point differences Some of the tests involve computing 64-bit (double diff --git a/doc/src/sgml/rules.sgml b/doc/src/sgml/rules.sgml index 01a536e800..ccbf7fb93a 100644 --- a/doc/src/sgml/rules.sgml +++ b/doc/src/sgml/rules.sgml @@ -1,4 +1,4 @@ - + The Rule System @@ -14,6 +14,9 @@ + + Introduction + Production rule systems are conceptually simple, but there are many subtle points involved in actually using @@ -42,6 +45,9 @@ as well as . + + + What is a Query Tree? diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml index 687045e37d..c1516db6ef 100644 --- a/doc/src/sgml/runtime.sgml +++ b/doc/src/sgml/runtime.sgml @@ -1,5 +1,5 @@ @@ -128,9 +128,9 @@ postgres$ initdb -D /usr/local/pgsql/data password to the database superuser. After initdb, modify pg_hba.conf to use md5 or password, instead of trust, authentication - before you first start the postmaster. (Other, possibly + before you start the server for the first time. (Other, possibly more convenient approaches include using ident - authentication or filesystem permissions to restrict connections. See + authentication or file system permissions to restrict connections. See for more information.) @@ -493,7 +493,7 @@ log_connections = yes syslog = 2 As you see, options are one per line. The equal sign between name - and value is optional. White space is insignificant, blank lines + and value is optional. Whitespace is insignificant, blank lines are ignored. Hash marks (#) introduce comments anywhere. @@ -504,7 +504,7 @@ syslog = 2 The configuration file is reread whenever the postmaster receives a SIGHUP signal (which is most easily sent by means - of pg_ctl reload). The postmaster also propagates + of pg_ctl reload). The postmaster also propagates this signal to all already-running backend processes, so that existing sessions also get the new default. Alternatively, you can send the signal to only one backend process @@ -886,7 +886,7 @@ env PGOPTIONS='-c geqo=off' psql LOG_PID (boolean) - Prefixes each server log message with the process id of the + Prefixes each server log message with the process ID of the backend process. This is useful to sort out which messages pertain to which connection. The default is off. diff --git a/doc/src/sgml/syntax.sgml b/doc/src/sgml/syntax.sgml index e1fbeeb3d5..48c5e7bb97 100644 --- a/doc/src/sgml/syntax.sgml +++ b/doc/src/sgml/syntax.sgml @@ -1,5 +1,5 @@ @@ -102,7 +102,8 @@ INSERT INTO MY_TABLE VALUES (3, 'hi there'); SQL identifiers and key words must begin with a letter - (a-z) or underscore + (a-z, but also letters with + diacritical marks and non-Latin letters) or an underscore (_). Subsequent characters in an identifier or key word can be letters, digits (0-9), or underscores, @@ -200,9 +201,9 @@ UPDATE "my_table" SET "a" = 5; - There are four kinds of implicitly typed + There are four kinds of implicitly-typed constants in PostgreSQL: - strings, bit strings, integers, and floating point numbers. + strings, bit strings, integers, and floating-point numbers. Constants can also be specified with explicit types, which can enable more accurate representation and more efficient handling by the system. The implicit constants are described below; explicit @@ -266,12 +267,12 @@ SELECT 'foobar'; SELECT 'foo' 'bar'; is not valid syntax, and PostgreSQL is - consistant with SQL9x in this regard. + consistent with SQL9x in this regard. - Bit String Constants + Bit-String Constants bit strings @@ -279,12 +280,12 @@ SELECT 'foo' 'bar'; - Bit string constants look like string constants with a + Bit-string constants look like string constants with a B (upper or lower case) immediately before the opening quote (no intervening whitespace), e.g., B'1001'. The only characters allowed within - bit string constants are 0 and - 1. Bit string constants can be continued + bit-string constants are 0 and + 1. Bit-string constants can be continued across lines in the same way as regular string constants. @@ -303,7 +304,7 @@ SELECT 'foo' 'bar'; - Floating Point Constants + Floating-Point Constants floating point @@ -311,7 +312,7 @@ SELECT 'foo' 'bar'; - Floating point constants are accepted in these general forms: + Floating-point constants are accepted in these general forms: digits.digitse+-digits digits.digitse+-digits @@ -321,7 +322,7 @@ SELECT 'foo' 'bar'; digits. At least one digit must be before or after the decimal point. At least one digit must follow the exponent delimiter (e) if that field is present. - Thus, a floating point constant is distinguished from an integer + Thus, a floating-point constant is distinguished from an integer constant by the presence of either the decimal point or the exponent clause (or both). There must not be a space or other characters embedded in the constant. @@ -329,7 +330,7 @@ SELECT 'foo' 'bar'; - These are some examples of valid floating point constants: + These are some examples of valid floating-point constants: 3.5 4. @@ -341,7 +342,7 @@ SELECT 'foo' 'bar'; - Floating point constants are of type DOUBLE + Floating-point constants are of type DOUBLE PRECISION. REAL can be specified explicitly by using SQL string notation or PostgreSQL type notation: @@ -385,9 +386,13 @@ CAST ( 'string' AS type ) typename ( value ) although this only works for types whose names are also valid as - function names. (For example, double precision - can't be used this way --- but the equivalent float8 - can.) + function names. For example, double precision + can't be used this way, but the equivalent float8 + can. Also, the names interval, time, and + timestamp can only be used in this context if they are + double-quoted, because of parser conflicts. Therefore, the use of + the function-like cast syntax leads to inconsistencies and should + probably be avoided in new applications. @@ -429,8 +434,8 @@ CAST ( 'string' AS type ) Individual array elements can be placed between double-quote marks (") to avoid ambiguity - problems with respect to white space. Without quote marks, the - array-value parser will skip leading white space. + problems with respect to whitespace. Without quote marks, the + array-value parser will skip leading whitespace. @@ -495,7 +500,7 @@ CAST ( 'string' AS type ) When working with non-SQL-standard operator names, you will usually need to separate adjacent operators with spaces to avoid ambiguity. - For example, if you have defined a left-unary operator named @, + For example, if you have defined a left unary operator named @, you cannot write X*@Y; you must write X* @Y to ensure that PostgreSQL reads it as two operator names @@ -574,7 +579,7 @@ CAST ( 'string' AS type ) - The period (.) is used in floating point + The period (.) is used in floating-point constants, and to separate table and column names. @@ -670,8 +675,8 @@ CAST ( 'string' AS type ) The identity (transaction ID) of the inserting transaction for - this tuple. (Note: a tuple is an individual state of a row; - each UPDATE of a row creates a new tuple for the same logical row.) + this tuple. (Note: A tuple is an individual state of a row; + each update of a row creates a new tuple for the same logical row.) @@ -733,7 +738,7 @@ CAST ( 'string' AS type ) counter to wrap around. Hence, it is bad practice to assume that OIDs are unique, unless you take steps to ensure that they are unique. Recommended practice when using OIDs for row identification is to create - a unique index on the OID column of each table for which the OID will be + a unique constraint on the OID column of each table for which the OID will be used. Never assume that OIDs are unique across tables; use the combination of tableoid and row OID if you need a database-wide identifier. (Future releases of PostgreSQL are likely to use a separate @@ -756,12 +761,6 @@ CAST ( 'string' AS type ) In practice this limit is not a problem --- note that the limit is on number of SQL queries, not number of tuples processed. - - - For further information on the system attributes consult - . - - @@ -916,7 +915,7 @@ $number CREATE FUNCTION dept (text) RETURNS dept AS 'SELECT * FROM dept WHERE name = $1' - LANGUAGE 'sql'; + LANGUAGE SQL; Here the $1 will be replaced by the first diff --git a/doc/src/sgml/xaggr.sgml b/doc/src/sgml/xaggr.sgml index c73a0e0d6a..e82406ec02 100644 --- a/doc/src/sgml/xaggr.sgml +++ b/doc/src/sgml/xaggr.sgml @@ -1,5 +1,5 @@ @@ -39,42 +39,42 @@ $Header: /cvsroot/pgsql/doc/src/sgml/xaggr.sgml,v 1.15 2001/11/21 06:09:45 thoma the column values from each row. Sum is an example of this kind of aggregate. Sum starts at zero and always adds the current row's value to - its running total. For example, if we want to make a Sum + its running total. For example, if we want to make a sum aggregate to work on a data type for complex numbers, we only need the addition function for that data type. The aggregate definition is: - + CREATE AGGREGATE complex_sum ( sfunc = complex_add, basetype = complex, stype = complex, initcond = '(0,0)' ); + + SELECT complex_sum(a) FROM test_complex; - +------------+ - |complex_sum | - +------------+ - |(34,53.9) | - +------------+ - + complex_sum +------------- + (34,53.9) + (In practice, we'd just name the aggregate sum, and rely on PostgreSQL to figure out which kind - of sum to apply to a complex column.) + of sum to apply to a column of type complex.) - The above definition of Sum will return zero (the initial + The above definition of sum will return zero (the initial state condition) if there are no non-null input values. - Perhaps we want to return NULL in that case instead --- SQL92 - expects Sum to behave that way. We can do this simply by + Perhaps we want to return NULL in that case instead --- the SQL standard + expects sum to behave that way. We can do this simply by omitting the initcond phrase, so that the initial state condition is NULL. Ordinarily this would mean that the sfunc would need to check for a NULL state-condition input, but for - Sum and some other simple aggregates like Max and Min, + sum and some other simple aggregates like max and min, it's sufficient to insert the first non-null input value into the state variable and then start applying the transition function at the second non-null input value. PostgreSQL @@ -93,7 +93,7 @@ SELECT complex_sum(a) FROM test_complex; - Average is a more complex example of an aggregate. It requires + Avg (average) is a more complex example of an aggregate. It requires two pieces of running state: the sum of the inputs and the count of the number of inputs. The final result is obtained by dividing these quantities. Average is typically implemented by using a @@ -101,7 +101,7 @@ SELECT complex_sum(a) FROM test_complex; the built-in implementation of avg(float8) looks like: - + CREATE AGGREGATE avg ( sfunc = float8_accum, basetype = float8, @@ -109,18 +109,13 @@ CREATE AGGREGATE avg ( finalfunc = float8_avg, initcond = '{0,0}' ); - + - For further details see - - CREATE AGGREGATE in - The PostgreSQL User's Guide. + For further details see the description of the CREATE + AGGREGATE command in the Reference + Manual. diff --git a/doc/src/sgml/xfunc.sgml b/doc/src/sgml/xfunc.sgml index 1b3c86d3f0..a52bc02eb4 100644 --- a/doc/src/sgml/xfunc.sgml +++ b/doc/src/sgml/xfunc.sgml @@ -1,5 +1,5 @@ @@ -59,7 +59,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/xfunc.sgml,v 1.45 2001/11/21 06:09:45 thoma Every kind - of function can take a base type, a composite type or + of function can take a base type, a composite type, or some combination as arguments (parameters). In addition, every kind of function can return a base type or a composite type. It's easiest to define SQL @@ -67,6 +67,12 @@ $Header: /cvsroot/pgsql/doc/src/sgml/xfunc.sgml,v 1.45 2001/11/21 06:09:45 thoma can also be found in funcs.sql and funcs.c in the tutorial directory. + + + Throughout this chapter, it can be useful to look at the reference + page of the CREATE FUNCTION command to + understand the examples better. + @@ -268,7 +274,7 @@ SELECT name, double_salary(EMP) AS dream Notice the use of the syntax $1.salary to select one field of the argument row value. Also notice - how the calling SELECT command uses a table name to denote + how the calling SELECT command uses a table name to denote the entire current row of that table as a composite value. @@ -376,7 +382,7 @@ ERROR: parser: parse error at or near "." Another way to use a function returning a row result is to declare a - second function accepting a rowtype parameter, and pass the function + second function accepting a row type parameter, and pass the function result to it: @@ -400,15 +406,15 @@ SELECT getname(new_emp()); As previously mentioned, an SQL function may be declared as - returning SETOF sometype. - In this case the function's final SELECT query is executed to + returning SETOF sometype. + In this case the function's final SELECT query is executed to completion, and each row it outputs is returned as an element of the set. Functions returning sets may only be called in the target list - of a SELECT query. For each row that the SELECT generates by itself, + of a SELECT query. For each row that the SELECT generates by itself, the function returning set is invoked, and an output row is generated for each element of the function's result set. An example: @@ -449,9 +455,9 @@ SELECT name, listchildren(name) FROM nodes; (5 rows) - In the last SELECT, - notice that no output row appears for Child2, Child3, etc. - This happens because listchildren() returns an empty set + In the last SELECT, + notice that no output row appears for Child2, Child3, etc. + This happens because listchildren returns an empty set for those inputs, so no output rows are generated. @@ -607,7 +613,7 @@ CREATE FUNCTION square_root(double precision) RETURNS double precision - The user id the PostgreSQL server runs + The user ID the PostgreSQL server runs as must be able to traverse the path to the file you intend to load. Making the file or a higher-level directory not readable and/or not executable by the postgres user is a @@ -671,7 +677,7 @@ CREATE FUNCTION square_root(double precision) RETURNS double precision gives the C type required for parameters in the C functions that will be loaded into - PostgreSQL + PostgreSQL. The Defined In column gives the header file that needs to be included to get the type definition. (The actual definition may be in a different file that is included by the @@ -1262,9 +1268,9 @@ concat_text(PG_FUNCTION_ARGS) At first glance, the version-1 coding conventions may appear to be just pointless obscurantism. However, they do offer a number of improvements, because the macros can hide unnecessary detail. - An example is that in coding add_one_float8, we no longer need to - be aware that float8 is a pass-by-reference type. Another - example is that the GETARG macros for variable-length types hide + An example is that in coding add_one_float8, we no longer need to + be aware that float8 is a pass-by-reference type. Another + example is that the GETARG macros for variable-length types hide the need to deal with fetching toasted (compressed or out-of-line) values. The old-style copytext and concat_text functions shown above are @@ -1277,7 +1283,7 @@ concat_text(PG_FUNCTION_ARGS) One big improvement in version-1 functions is better handling of NULL - inputs and results. The macro PG_ARGISNULL(n) + inputs and results. The macro PG_ARGISNULL(n) allows a function to test whether each input is NULL (of course, doing this is only necessary in functions not declared strict). As with the @@ -1287,7 +1293,7 @@ concat_text(PG_FUNCTION_ARGS) PG_GETARG_xxx() until one has verified that the argument isn't NULL. To return a NULL result, execute PG_RETURN_NULL(); - this works in both strict and non-strict functions. + this works in both strict and nonstrict functions. @@ -1323,7 +1329,7 @@ FROM emp WHERE name = 'Bill' OR name = 'Sam'; - In the query above, we can define c_overpaid as: + In the query above, we can define c_overpaid as: #include "postgres.h" @@ -1371,13 +1377,13 @@ c_overpaid(PG_FUNCTION_ARGS) three arguments: the argument of type TupleTableSlot* passed into the function, the name of the desired attribute, and a return parameter that tells whether the attribute - is null. GetAttributeByName returns a Datum - value that you can convert to the proper datatype by using the + is null. GetAttributeByName returns a Datum + value that you can convert to the proper data type by using the appropriate DatumGetXXX() macro. - The following query lets PostgreSQL + The following command lets PostgreSQL know about the c_overpaid function: diff --git a/doc/src/sgml/xindex.sgml b/doc/src/sgml/xindex.sgml index 453f86ceae..1ef50c27b9 100644 --- a/doc/src/sgml/xindex.sgml +++ b/doc/src/sgml/xindex.sgml @@ -1,15 +1,18 @@ - - Interfacing Extensions To Indexes + + Interfacing Extensions To Indexes + + + Introduction - The procedures described thus far let you define a new type, new - functions and new operators. However, we cannot yet define a secondary - index (such as a B-tree, R-tree or + The procedures described thus far let you define new types, new + functions, and new operators. However, we cannot yet define a secondary + index (such as a B-tree, R-tree, or hash access method) over a new type or its operators. @@ -25,14 +28,19 @@ PostgreSQL documentation class for the B-tree access method that stores and sorts complex numbers in ascending absolute value order. + + + + Access Methods - The pg_am table contains one row for every index - access method. Support for the heap access method is built into - PostgreSQL, but every other access method is - described in pg_am. The schema is + The pg_am table contains one row for every + index access method. Support for the heap access method is built + into PostgreSQL, but all other access + methods are described in pg_am. The schema is + shown in . - +
Index Access Method Schema @@ -49,7 +57,7 @@ PostgreSQL documentation amowner - user id of the owner + user ID of the owner (currently not used) amstrategies @@ -66,7 +74,7 @@ PostgreSQL documentation amcanunique - does AM support UNIQUE indexes? + does AM support unique indexes? amcanmulticol @@ -89,7 +97,7 @@ PostgreSQL documentation ... procedure identifiers for interface routines to the access - method. For example, regproc ids for opening, closing, and + method. For example, regproc IDs for opening, closing, and getting rows from the access method appear here. @@ -104,49 +112,55 @@ PostgreSQL documentation you are interested in is the object ID of the access method you want to extend: - + SELECT oid FROM pg_am WHERE amname = 'btree'; oid ----- 403 (1 row) - + - We will use that SELECT in a WHERE + We will use that query in a WHERE clause later. + + + + Access Method Strategies - The amstrategies column exists to standardize - comparisons across data types. For example, B-trees + The amstrategies column exists to standardize + comparisons across data types. For example, B-trees impose a strict ordering on keys, lesser to greater. Since PostgreSQL allows the user to define operators, PostgreSQL cannot look at the name of an operator (e.g., > or <) and tell what kind of comparison it is. In fact, some access methods don't impose any ordering at all. For example, - R-trees express a rectangle-containment relationship, + R-trees express a rectangle-containment relationship, whereas a hashed data structure expresses only bitwise similarity based on the value of a hash function. PostgreSQL needs some consistent way of taking a qualification in your query, - looking at the operator and then deciding if a usable index exists. This + looking at the operator, and then deciding if a usable index exists. This implies that PostgreSQL needs to know, for example, that the <= and > operators partition a - B-tree. PostgreSQL - uses strategies to express these relationships between + B-tree. PostgreSQL + uses strategies to express these relationships between operators and the way they can be used to scan indexes. - Defining a new set of strategies is beyond the scope of this discussion, - but we'll explain how B-tree strategies work because + Defining a new set of strategies is beyond the scope of this + discussion, but we'll explain how B-tree strategies work because you'll need to know that to add a new B-tree operator class. In the - pg_am table, the amstrategies column is the - number of strategies defined for this access method. For - B-trees, this number is 5. These strategies - correspond to + pg_am table, the + amstrategies column sets the number of + strategies defined for this access method. For B-trees, this number + is 5. The meanings of these strategies are shown in . + -
+
B-tree StrategiesB-tree @@ -180,26 +194,29 @@ SELECT oid FROM pg_am WHERE amname = 'btree';
-
- The idea is that you'll need to add operators corresponding to the - comparisons above to the pg_amop relation (see below). + The idea is that you'll need to add operators corresponding to these strategies + to the pg_amop relation (see below). The access method code can use these strategy numbers, regardless of data - type, to figure out how to partition the B-tree, + type, to figure out how to partition the B-tree, compute selectivity, and so on. Don't worry about the details of adding operators yet; just understand that there must be a set of these - operators for int2, int4, oid, and every other - data type on which a B-tree can operate. + operators for int2, int4, oid, and all other + data types on which a B-tree can operate. +
+ + + Access Method Support Routines Sometimes, strategies aren't enough information for the system to figure out how to use an index. Some access methods require additional support - routines in order to work. For example, the B-tree + routines in order to work. For example, the B-tree access method must be able to compare two keys and determine whether one is greater than, equal to, or less than the other. Similarly, the - R-tree access method must be able to compute + R-tree access method must be able to compute intersections, unions, and sizes of rectangles. These operations do not correspond to operators used in qualifications in SQL queries; they are administrative routines used by @@ -209,60 +226,60 @@ SELECT oid FROM pg_am WHERE amname = 'btree'; In order to manage diverse support routines consistently across all PostgreSQL access methods, - pg_am includes a column called - amsupport. This column records the number of - support routines used by an access method. For B-trees, - this number is one -- the routine to take two keys and return -1, 0, or - +1, depending on whether the first key is less than, equal - to, or greater than the second. - - - - Strictly speaking, this routine can return a negative - number (< 0), zero, or a non-zero positive number (> 0). - - + pg_am includes a column called + amsupport. This column records the + number of support routines used by an access method. For B-trees, + this number is one: the routine to take two keys and return -1, 0, + or +1, depending on whether the first key is less than, equal to, + or greater than the second. (Strictly speaking, this routine can + return a negative number (< 0), zero, or a non-zero positive + number (> 0).) - The amstrategies entry in pg_am - is just the number - of strategies defined for the access method in question. The operators - for less than, less equal, and so on don't appear in - pg_am. Similarly, amsupport - is just the number of support routines required by the access - method. The actual routines are listed elsewhere. + The amstrategies entry in + pg_am is just the number of strategies + defined for the access method in question. The operators for less + than, less equal, and so on don't appear in + pg_am. Similarly, + amsupport is just the number of support + routines required by the access method. The actual routines are + listed elsewhere. - By the way, the amorderstrategy entry tells whether + By the way, the amorderstrategy column tells whether the access method supports ordered scan. Zero means it doesn't; if it - does, amorderstrategy is the number of the strategy + does, amorderstrategy is the number of the strategy routine that corresponds to the ordering operator. For example, B-tree - has amorderstrategy = 1 which is its + has amorderstrategy = 1, which is its less than strategy number. + + + + Operator Classes - The next table of interest is pg_opclass. This table + The next table of interest is pg_opclass. This table defines operator class names and input data types for each of the operator classes supported by a given index access method. The same class name can be used for several different access methods (for example, both B-tree and hash access methods have operator classes named - oid_ops), but a separate + oid_ops), but a separate pg_opclass row must appear for each access method. - The oid of the pg_opclass row is + The OID of the pg_opclass row is used as a foreign key in other tables to associate specific operators and support routines with the operator class. - You need to add a row with your opclass name (for example, - complex_abs_ops) to - pg_opclass: + You need to add a row with your operator class name (for example, + complex_abs_ops) to + pg_opclass: - + INSERT INTO pg_opclass (opcamid, opcname, opcintype, opcdefault, opckeytype) VALUES ( (SELECT oid FROM pg_am WHERE amname = 'btree'), @@ -279,52 +296,50 @@ SELECT oid, * --------+---------+-----------------+-----------+------------+------------ 277975 | 403 | complex_abs_ops | 277946 | t | 0 (1 row) - + - Note that the oid for your pg_opclass row will + Note that the OID for your pg_opclass row will be different! Don't worry about this though. We'll get this number - from the system later just like we got the oid of the type here. + from the system later just like we got the OID of the type here. - The above example assumes that you want to make this new opclass the - default B-tree opclass for the complex data type. - If you don't, just set opcdefault to false instead. - opckeytype is not described here; it should always - be zero for B-tree opclasses. + The above example assumes that you want to make this new operator class the + default B-tree operator class for the complex data type. + If you don't, just set opcdefault to false instead. + opckeytype is not described here; it should always + be zero for B-tree operator classes. + + + + Creating the Operators and Support Routines So now we have an access method and an operator class. We still need a set of operators. The procedure for - defining operators was discussed earlier in this manual. - For the complex_abs_ops operator class on B-trees, + defining operators was discussed in . + For the complex_abs_ops operator class on B-trees, the operators we require are: - - absolute value less-than - absolute value less-than-or-equal - absolute value equal - absolute value greater-than-or-equal - absolute value greater-than - + + absolute-value less-than (strategy 1) + absolute-value less-than-or-equal (strategy 2) + absolute-value equal (strategy 3) + absolute-value greater-than-or-equal (strategy 4) + absolute-value greater-than (strategy 5) + Suppose the code that implements these functions is stored in the file - PGROOT/tutorial/complex.c, + PGROOT/src/tutorial/complex.c, which we have compiled into - PGROOT/tutorial/complex.so. - + PGROOT/src/tutorial/complex.so. + Part of the C code looks like this: - - Part of the C code looks like this: (note that we will only show the - equality operator for the rest of the examples. The other four - operators are very similar. Refer to complex.c - or complex.source for the details.) - - + #define Mag(c) ((c)->x*(c)->x + (c)->y*(c)->y) bool @@ -333,23 +348,27 @@ SELECT oid, * double amag = Mag(a), bmag = Mag(b); return (amag==bmag); } - + + (Note that we will only show the equality operator for the rest of + the examples. The other four operators are very similar. Refer to + complex.c or + complex.source for the details.) We make the function known to PostgreSQL like this: - -CREATE FUNCTION complex_abs_eq(complex, complex) - RETURNS bool - AS 'PGROOT/tutorial/complex' - LANGUAGE C; - + +CREATE FUNCTION complex_abs_eq(complex, complex) RETURNS boolean + AS 'PGROOT/src/tutorial/complex' + LANGUAGE C; + - There are some important things that are happening here. - + There are some important things that are happening here: + + First, note that operators for less-than, less-than-or-equal, equal, greater-than-or-equal, and greater-than for complex @@ -358,9 +377,11 @@ CREATE FUNCTION complex_abs_eq(complex, complex) we don't have any other operator = for complex, but if we were building a practical data type we'd probably want = to be the ordinary equality operation for complex numbers. In that case, - we'd need to use some other operator name for complex_abs_eq. + we'd need to use some other operator name for complex_abs_eq. + + Second, although PostgreSQL can cope with operators having the same name as long as they have different input data types, C can only @@ -369,7 +390,9 @@ CREATE FUNCTION complex_abs_eq(complex, complex) Usually it's a good practice to include the data type name in the C function name, so as not to conflict with functions for other data types. + + Third, we could have made the PostgreSQL name of the function abs_eq, relying on PostgreSQL to distinguish it @@ -377,58 +400,63 @@ CREATE FUNCTION complex_abs_eq(complex, complex) To keep the example simple, we make the function have the same names at the C level and PostgreSQL level. + + Finally, note that these operator functions return Boolean values. - In practice, all operators defined as index access method strategies - must return Boolean, since they must appear at the top level of a WHERE - clause to be used with an index. - (On the other - hand, the support function returns whatever the particular access method - expects -- in this case, a signed integer.) + In practice, all operators defined as index access method + strategies must return type boolean, since they must + appear at the top level of a WHERE clause to be used with an index. + (On the other hand, the support function returns whatever the + particular access method expects -- in this case, a signed + integer.) + + + - The final routine in the - file is the support routine mentioned when we discussed the amsupport - column of the pg_am table. We will use this - later on. For now, ignore it. + The final routine in the file is the support routine + mentioned when we discussed the amsupport column of the + pg_am table. We will use this later on. For + now, ignore it. Now we are ready to define the operators: - + CREATE OPERATOR = ( leftarg = complex, rightarg = complex, procedure = complex_abs_eq, restrict = eqsel, join = eqjoinsel ); - + The important - things here are the procedure names (which are the C + things here are the procedure names (which are the C functions defined above) and the restriction and join selectivity functions. You should just use the selectivity functions used in the example (see complex.source). Note that there are different such functions for the less-than, equal, and greater-than - cases. These must be supplied, or the optimizer will be unable to + cases. These must be supplied or the optimizer will be unable to make effective use of the index. The next step is to add entries for these operators to - the pg_amop relation. To do this, - we'll need the oids of the operators we just + the pg_amop relation. To do this, + we'll need the OIDs of the operators we just defined. We'll look up the names of all the operators that take - two complexes, and pick ours out: + two operands of type complex, and pick ours out: - - SELECT o.oid AS opoid, o.oprname - INTO TEMP TABLE complex_ops_tmp - FROM pg_operator o, pg_type t - WHERE o.oprleft = t.oid and o.oprright = t.oid + +SELECT o.oid AS opoid, o.oprname + INTO TEMP TABLE complex_ops_tmp + FROM pg_operator o, pg_type t + WHERE o.oprleft = t.oid and o.oprright = t.oid and t.typname = 'complex'; opoid | oprname @@ -440,30 +468,30 @@ CREATE OPERATOR = ( 277973 | >= 277974 | > (6 rows) - + - (Again, some of your oid numbers will almost + (Again, some of your OID numbers will almost certainly be different.) The operators we are interested in are those - with oids 277970 through 277974. The values you + with OIDs 277970 through 277974. The values you get will probably be different, and you should substitute them for the values below. We will do this with a select statement. - Now we are ready to insert entries into pg_amop for + Now we are ready to insert entries into pg_amop for our new operator class. These entries must associate the correct B-tree strategy numbers with each of the operators we need. The command to insert the less-than operator looks like: - - INSERT INTO pg_amop (amopclaid, amopstrategy, amopreqcheck, amopopr) - SELECT opcl.oid, 1, false, c.opoid + +INSERT INTO pg_amop (amopclaid, amopstrategy, amopreqcheck, amopopr) + SELECT opcl.oid, 1, false, c.opoid FROM pg_opclass opcl, complex_ops_tmp c WHERE opcamid = (SELECT oid FROM pg_am WHERE amname = 'btree') AND opcname = 'complex_abs_ops' AND c.oprname = '<'; - + Now do this for the other operators substituting for the 1 in the second line above and the < in the last line. Note the order: @@ -478,51 +506,58 @@ CREATE OPERATOR = ( - The final step is registration of the support routine previously - described in our discussion of pg_am. The - oid of this support routine is stored in the - pg_amproc table, keyed by the operator class - oid and the support routine number. + The final step is the registration of the support routine previously + described in our discussion of pg_am. The + OID of this support routine is stored in the + pg_amproc table, keyed by the operator class + OID and the support routine number. + + + First, we need to register the function in PostgreSQL (recall that we put the - C code that implements this routine in the bottom of + C code that implements this routine in the bottom of the file in which we implemented the operator routines): - - CREATE FUNCTION complex_abs_cmp(complex, complex) - RETURNS int4 - AS 'PGROOT/tutorial/complex' - LANGUAGE C; + +CREATE FUNCTION complex_abs_cmp(complex, complex) + RETURNS integer + AS 'PGROOT/src/tutorial/complex' + LANGUAGE C; - SELECT oid, proname FROM pg_proc - WHERE proname = 'complex_abs_cmp'; +SELECT oid, proname FROM pg_proc + WHERE proname = 'complex_abs_cmp'; oid | proname --------+----------------- 277997 | complex_abs_cmp (1 row) - + + + (Again, your OID number will probably be different.) + - (Again, your oid number will probably be different.) + We can add the new row as follows: - - INSERT INTO pg_amproc (amopclaid, amprocnum, amproc) - SELECT opcl.oid, 1, p.oid + +INSERT INTO pg_amproc (amopclaid, amprocnum, amproc) + SELECT opcl.oid, 1, p.oid FROM pg_opclass opcl, pg_proc p WHERE opcamid = (SELECT oid FROM pg_am WHERE amname = 'btree') AND opcname = 'complex_abs_ops' AND p.proname = 'complex_abs_cmp'; - + And we're done! (Whew.) It should now be possible to create - and use B-tree indexes on complex columns. + and use B-tree indexes on complex columns. + -
+
Extending <Acronym>SQL</Acronym>: Operators + + Introduction + PostgreSQL supports left unary, - right unary and binary + right unary, and binary operators. Operators can be overloaded; that is, the same operator name can be used for different operators - that have different numbers and types of arguments. If + that have different numbers and types of operands. If there is an ambiguous situation and the system cannot determine the correct operator to use, it will return - an error. You may have to typecast the left and/or + an error. You may have to type-cast the left and/or right operands to help it understand which operator you meant to use. @@ -22,20 +25,24 @@ $Header: /cvsroot/pgsql/doc/src/sgml/xoper.sgml,v 1.16 2001/11/21 06:09:45 thoma Every operator is syntactic sugar for a call to an underlying function that does the real work; so you must first create the underlying function before you can create - the operator. However, an operator is not - merely syntactic sugar, because it carries additional information + the operator. However, an operator is not merely + syntactic sugar, because it carries additional information that helps the query planner optimize queries that use the operator. Much of this chapter will be devoted to explaining that additional information. + + + + Example - Here is an example of creating an operator for adding two - complex numbers. We assume we've already created the definition - of type complex. First we need a function that does the work; - then we can define the operator: + Here is an example of creating an operator for adding two complex + numbers. We assume we've already created the definition of type + complex (see ). First we need a + function that does the work, then we can define the operator: - + CREATE FUNCTION complex_add(complex, complex) RETURNS complex AS 'PGROOT/tutorial/complex' @@ -47,34 +54,33 @@ CREATE OPERATOR + ( procedure = complex_add, commutator = + ); - + Now we can do: - + SELECT (a + b) AS c FROM test_complex; -+----------------+ -|c | -+----------------+ -|(5.2,6.05) | -+----------------+ -|(133.42,144.95) | -+----------------+ - + c +----------------- + (5.2,6.05) + (133.42,144.95) + - We've shown how to create a binary operator here. To - create unary operators, just omit one of leftarg (for - left unary) or rightarg (for right unary). The procedure - clause and the argument clauses are the only required items - in CREATE OPERATOR. The COMMUTATOR clause shown in the example - is an optional hint to the query optimizer. Further details about - COMMUTATOR and other optimizer hints appear below. + We've shown how to create a binary operator here. To create unary + operators, just omit one of leftarg (for left unary) or + rightarg (for right unary). The procedure + clause and the argument clauses are the only required items in + CREATE OPERATOR. The commutator + clause shown in the example is an optional hint to the query + optimizer. Further details about commutator and other + optimizer hints appear below. + Operator Optimization Information @@ -102,28 +108,28 @@ SELECT (a + b) AS c FROM test_complex; Additional optimization clauses might be added in future versions of PostgreSQL. The ones described here are all - the ones that release 6.5 understands. + the ones that release &version; understands. COMMUTATOR - The COMMUTATOR clause, if provided, names an operator that is the + The COMMUTATOR clause, if provided, names an operator that is the commutator of the operator being defined. We say that operator A is the commutator of operator B if (x A y) equals (y B x) for all possible input - values x,y. Notice that B is also the commutator of A. For example, + values x, y. Notice that B is also the commutator of A. For example, operators < and > for a particular data type are usually each others' commutators, and operator + is usually commutative with itself. But operator - is usually not commutative with anything. - The left argument type of a commuted operator is the same as the - right argument type of its commutator, and vice versa. So the name of + The left operand type of a commuted operator is the same as the + right operand type of its commutator, and vice versa. So the name of the commutator operator is all that PostgreSQL - needs to be given to look up the commutator, and that's all that need - be provided in the COMMUTATOR clause. + needs to be given to look up the commutator, and that's all that needs to + be provided in the COMMUTATOR clause. @@ -136,29 +142,29 @@ SELECT (a + b) AS c FROM test_complex; - One way is to omit the COMMUTATOR clause in the first operator that + One way is to omit the COMMUTATOR clause in the first operator that you define, and then provide one in the second operator's definition. Since PostgreSQL knows that commutative operators come in pairs, when it sees the second definition it will - automatically go back and fill in the missing COMMUTATOR clause in + automatically go back and fill in the missing COMMUTATOR clause in the first definition. - The other, more straightforward way is just to include COMMUTATOR clauses + The other, more straightforward way is just to include COMMUTATOR clauses in both definitions. When PostgreSQL processes - the first definition and realizes that COMMUTATOR refers to a non-existent + the first definition and realizes that COMMUTATOR refers to a non-existent operator, the system will make a dummy entry for that operator in the - system's pg_operator table. This dummy entry will have valid data only - for the operator name, left and right argument types, and result type, + system catalog. This dummy entry will have valid data only + for the operator name, left and right operand types, and result type, since that's all that PostgreSQL can deduce at this point. The first operator's catalog entry will link to this dummy entry. Later, when you define the second operator, the system updates the dummy entry with the additional information from the second definition. If you try to use the dummy operator before it's been filled - in, you'll just get an error message. (Note: this procedure did not work + in, you'll just get an error message. (Note: This procedure did not work reliably in PostgreSQL versions before 6.5, but it is now the recommended way to do things.) @@ -171,29 +177,29 @@ SELECT (a + b) AS c FROM test_complex; NEGATOR - The NEGATOR clause, if provided, names an operator that is the + The NEGATOR clause, if provided, names an operator that is the negator of the operator being defined. We say that operator A - is the negator of operator B if both return boolean results and - (x A y) equals NOT (x B y) for all possible inputs x,y. + is the negator of operator B if both return Boolean results and + (x A y) equals NOT (x B y) for all possible inputs x, y. Notice that B is also the negator of A. For example, < and >= are a negator pair for most data types. - An operator can never be validly be its own negator. + An operator can never validly be its own negator. - Unlike COMMUTATOR, a pair of unary operators could validly be marked + Unlike commutators, a pair of unary operators could validly be marked as each others' negators; that would mean (A x) equals NOT (B x) - for all x, or the equivalent for right-unary operators. + for all x, or the equivalent for right unary operators. - An operator's negator must have the same left and/or right argument types - as the operator itself, so just as with COMMUTATOR, only the operator - name need be given in the NEGATOR clause. + An operator's negator must have the same left and/or right operand types + as the operator itself, so just as with COMMUTATOR, only the operator + name need be given in the NEGATOR clause. - Providing NEGATOR is very helpful to the query optimizer since + Providing a negator is very helpful to the query optimizer since it allows expressions like NOT (x = y) to be simplified into x <> y. This comes up more often than you might think, because NOTs can be inserted as a consequence of other rearrangements. @@ -210,21 +216,21 @@ SELECT (a + b) AS c FROM test_complex; RESTRICT - The RESTRICT clause, if provided, names a restriction selectivity + The RESTRICT clause, if provided, names a restriction selectivity estimation function for the operator (note that this is a function - name, not an operator name). RESTRICT clauses only make sense for - binary operators that return boolean. The idea behind a restriction + name, not an operator name). RESTRICT clauses only make sense for + binary operators that return boolean. The idea behind a restriction selectivity estimator is to guess what fraction of the rows in a - table will satisfy a WHERE-clause condition of the form - - field OP constant - + table will satisfy a WHERE-clause condition of the form + +column OP constant + for the current operator and a particular constant value. This assists the optimizer by - giving it some idea of how many rows will be eliminated by WHERE + giving it some idea of how many rows will be eliminated by WHERE clauses that have this form. (What happens if the constant is on the left, you may be wondering? Well, that's one of the things that - COMMUTATOR is for...) + COMMUTATOR is for...) @@ -232,12 +238,12 @@ SELECT (a + b) AS c FROM test_complex; the scope of this chapter, but fortunately you can usually just use one of the system's standard estimators for many of your own operators. These are the standard restriction estimators: - - eqsel for = - neqsel for <> - scalarltsel for < or <= - scalargtsel for > or >= - + + eqsel for = + neqsel for <> + scalarltsel for < or <= + scalargtsel for > or >= + It might seem a little odd that these are the categories, but they make sense if you think about it. = will typically accept only a small fraction of the rows in a table; <> will typically reject @@ -252,28 +258,28 @@ SELECT (a + b) AS c FROM test_complex; - You can frequently get away with using either eqsel or neqsel for + You can frequently get away with using either eqsel or neqsel for operators that have very high or very low selectivity, even if they aren't really equality or inequality. For example, the - approximate-equality geometric operators use eqsel on the assumption that + approximate-equality geometric operators use eqsel on the assumption that they'll usually only match a small fraction of the entries in a table. - You can use scalarltsel and scalargtsel for comparisons on data types that + You can use scalarltsel and scalargtsel for comparisons on data types that have some sensible means of being converted into numeric scalars for range comparisons. If possible, add the data type to those understood - by the routine convert_to_scalar() in src/backend/utils/adt/selfuncs.c. + by the routine convert_to_scalar() in src/backend/utils/adt/selfuncs.c. (Eventually, this routine should be replaced by per-data-type functions - identified through a column of the pg_type table; but that hasn't happened + identified through a column of the pg_type system catalog; but that hasn't happened yet.) If you do not do this, things will still work, but the optimizer's estimates won't be as good as they could be. There are additional selectivity functions designed for geometric - operators in src/backend/utils/adt/geo_selfuncs.c: areasel, positionsel, - and contsel. At this writing these are just stubs, but you may want + operators in src/backend/utils/adt/geo_selfuncs.c: areasel, positionsel, + and contsel. At this writing these are just stubs, but you may want to use them (or even better, improve them) anyway. @@ -282,16 +288,16 @@ SELECT (a + b) AS c FROM test_complex; JOIN - The JOIN clause, if provided, names a join selectivity + The JOIN clause, if provided, names a join selectivity estimation function for the operator (note that this is a function - name, not an operator name). JOIN clauses only make sense for - binary operators that return boolean. The idea behind a join + name, not an operator name). JOIN clauses only make sense for + binary operators that return boolean. The idea behind a join selectivity estimator is to guess what fraction of the rows in a - pair of tables will satisfy a WHERE-clause condition of the form - - table1.field1 OP table2.field2 - - for the current operator. As with the RESTRICT clause, this helps + pair of tables will satisfy a WHERE-clause condition of the form + +table1.column1 OP table2.column2 + + for the current operator. As with the RESTRICT clause, this helps the optimizer very substantially by letting it figure out which of several possible join sequences is likely to take the least work. @@ -300,15 +306,15 @@ SELECT (a + b) AS c FROM test_complex; As before, this chapter will make no attempt to explain how to write a join selectivity estimator function, but will just suggest that you use one of the standard estimators if one is applicable: - - eqjoinsel for = - neqjoinsel for <> - scalarltjoinsel for < or <= - scalargtjoinsel for > or >= - areajoinsel for 2D area-based comparisons - positionjoinsel for 2D position-based comparisons - contjoinsel for 2D containment-based comparisons - + + eqjoinsel for = + neqjoinsel for <> + scalarltjoinsel for < or <= + scalargtjoinsel for > or >= + areajoinsel for 2D area-based comparisons + positionjoinsel for 2D position-based comparisons + contjoinsel for 2D containment-based comparisons + @@ -316,19 +322,19 @@ SELECT (a + b) AS c FROM test_complex; HASHES - The HASHES clause, if present, tells the system that it is OK to - use the hash join method for a join based on this operator. HASHES - only makes sense for binary operators that return boolean, and + The HASHES clause, if present, tells the system that it is OK to + use the hash join method for a join based on this operator. HASHES + only makes sense for binary operators that return boolean, and in practice the operator had better be equality for some data type. The assumption underlying hash join is that the join operator can - only return TRUE for pairs of left and right values that hash to the + only return true for pairs of left and right values that hash to the same hash code. If two values get put in different hash buckets, the join will never compare them at all, implicitly assuming that the - result of the join operator must be FALSE. So it never makes sense - to specify HASHES for operators that do not represent equality. + result of the join operator must be false. So it never makes sense + to specify HASHES for operators that do not represent equality. @@ -353,18 +359,18 @@ SELECT (a + b) AS c FROM test_complex; There are also machine-dependent ways in which a hash join might fail to do the right thing. For example, if your data type is a structure in which there may be uninteresting pad bits, it's unsafe - to mark the equality operator HASHES. (Unless, perhaps, you write + to mark the equality operator HASHES. (Unless, perhaps, you write your other operators to ensure that the unused bits are always zero.) - Another example is that the FLOAT data types are unsafe for hash - joins. On machines that meet the IEEE floating point standard, minus + Another example is that the floating-point data types are unsafe for hash + joins. On machines that meet the IEEE floating-point standard, minus zero and plus zero are different values (different bit patterns) but - they are defined to compare equal. So, if float equality were marked - HASHES, a minus zero and a plus zero would probably not be matched up + they are defined to compare equal. So, if the equality operator on floating-point data types were marked + HASHES, a minus zero and a plus zero would probably not be matched up by a hash join, but they would be matched up by any other join process. - The bottom line is that you should probably only use HASHES for + The bottom line is that you should probably only use HASHES for equality operators that are (or could be) implemented by memcmp(). @@ -374,11 +380,11 @@ SELECT (a + b) AS c FROM test_complex; SORT1 and SORT2 - The SORT clauses, if present, tell the system that it is permissible to use + The SORT clauses, if present, tell the system that it is permissible to use the merge join method for a join based on the current operator. Both must be specified if either is. The current operator must be - equality for some pair of data types, and the SORT1 and SORT2 clauses - name the ordering operator ('<' operator) for the left and right-side + equality for some pair of data types, and the SORT1 and SORT2 clauses + name the ordering operator (< operator) for the left and right-side data types respectively. @@ -388,29 +394,29 @@ SELECT (a + b) AS c FROM test_complex; be capable of being fully ordered, and the join operator must be one that can only succeed for pairs of values that fall at the same place in the sort order. In practice this means that the join operator must - behave like equality. But unlike hashjoin, where the left and right + behave like equality. But unlike hash join, where the left and right data types had better be the same (or at least bitwise equivalent), - it is possible to mergejoin two + it is possible to merge-join two distinct data types so long as they are logically compatible. For - example, the int2-versus-int4 equality operator is mergejoinable. + example, the int2-versus-int4 equality operator is merge-joinable. We only need sorting operators that will bring both data types into a logically compatible sequence. - When specifying merge sort operators, the current operator and both - referenced operators must return boolean; the SORT1 operator must have - both input data types equal to the current operator's left argument type, - and the SORT2 operator must have - both input data types equal to the current operator's right argument type. - (As with COMMUTATOR and NEGATOR, this means that the operator name is + When specifying merge-sort operators, the current operator and both + referenced operators must return boolean; the SORT1 operator must have + both input data types equal to the current operator's left operand type, + and the SORT2 operator must have + both input data types equal to the current operator's right operand type. + (As with COMMUTATOR and NEGATOR, this means that the operator name is sufficient to specify the operator, and the system is able to make dummy operator entries if you happen to define the equality operator before the other ones.) - In practice you should only write SORT clauses for an = operator, + In practice you should only write SORT clauses for an = operator, and the two referenced operators should always be named <. Trying to use merge join with operators named anything else will result in hopeless confusion, for reasons we'll see in a moment. @@ -418,14 +424,14 @@ SELECT (a + b) AS c FROM test_complex; There are additional restrictions on operators that you mark - mergejoinable. These restrictions are not currently checked by - CREATE OPERATOR, but a merge join may fail at runtime if any are + merge-joinable. These restrictions are not currently checked by + CREATE OPERATOR, but a merge join may fail at run time if any are not true: - The mergejoinable equality operator must have a commutator + The merge-joinable equality operator must have a commutator (itself if the two data types are the same, or a related equality operator if they are different). @@ -434,13 +440,13 @@ SELECT (a + b) AS c FROM test_complex; There must be < and > ordering operators having the same left and - right input data types as the mergejoinable operator itself. These + right operand data types as the merge-joinable operator itself. These operators must be named < and >; you do not have any choice in the matter, since there is no provision for specifying them explicitly. Note that if the left and right data types are different, neither of these operators is the same as either - SORT operator. But they had better order the data values compatibly - with the SORT operators, or mergejoin will fail to work. + SORT operator. But they had better order the data values compatibly + with the SORT operators, or the merge join will fail to work. diff --git a/doc/src/sgml/xplang.sgml b/doc/src/sgml/xplang.sgml index ce8c0d0ac7..9accf8daa7 100644 --- a/doc/src/sgml/xplang.sgml +++ b/doc/src/sgml/xplang.sgml @@ -1,10 +1,13 @@ Procedural Languages + + Introduction + PostgreSQL allows users to add new programming languages to be available for writing functions and @@ -23,11 +26,12 @@ $Header: /cvsroot/pgsql/doc/src/sgml/xplang.sgml,v 1.16 2001/11/21 06:09:45 thom - Writing a handler for a new procedural language is outside the - scope of this manual, although some information is provided in - the CREATE LANGUAGE reference page. Several procedural languages are - available in the standard PostgreSQL distribution. + Writing a handler for a new procedural language is described in + . Several procedural languages are + available in the standard PostgreSQL + distribution, which can serve as examples. + Installing Procedural Languages @@ -73,7 +77,7 @@ createlang plpgsql template1 - + The handler must be declared with the command @@ -88,43 +92,51 @@ CREATE FUNCTION handler_function_name () - + The PL must be declared with the command CREATE TRUSTED PROCEDURAL LANGUAGE language-name HANDLER handler_function_name; - The optional key word TRUSTED tells - whether ordinary database users that have no superuser - privileges should be allowed to use this language to create functions - and trigger procedures. Since PL functions are - executed inside the database backend, the TRUSTED - flag should only be given for - languages that do not allow access to database backends - internals or the file system. The languages PL/pgSQL, - PL/Tcl, and PL/Perl are known to be trusted; the language PL/TclU - should not be marked trusted. + The optional key word TRUSTED tells whether + ordinary database users that have no superuser privileges should + be allowed to use this language to create functions and trigger + procedures. Since PL functions are executed inside the database + server, the TRUSTED flag should only be given + for languages that do not allow access to database server + internals or the file system. The languages + PL/pgSQL, + PL/Tcl, + PL/Perl, and + PL/Python are known to be trusted; + the languages PL/TclU and + PL/PerlU are designed to provide + unlimited functionality should not be + marked trusted. - In a default PostgreSQL installation, the - handler for the PL/pgSQL language is built and installed into the - library directory. If Tcl/Tk support is configured - in, the handlers for PL/Tcl and PL/TclU are also built and installed in - the same location. Likewise, the PL/Perl handler is built and installed - if Perl support is configured. The createlang - script automates the two CREATE steps described above. + In a default PostgreSQL installation, + the handler for the PL/pgSQL language + is built and installed into the library + directory. If Tcl/Tk support is configured in, the handlers for + PL/Tcl and PL/TclU are also built and installed in the same + location. Likewise, the PL/Perl and PL/PerlU handlers are built + and installed if Perl support is configured, and PL/Python is + installed if Python support is configured. The + createlang script automates and described above. - - Example + + Manual Installation of <application>PL/pgSQL</application> - - The following command tells the database where to find the + The following command tells the database server where to find the shared object for the PL/pgSQL language's call handler function. @@ -132,9 +144,7 @@ CREATE FUNCTION plpgsql_call_handler () RETURNS OPAQUE AS '$libdir/plpgsql' LANGUAGE C; - - The command @@ -145,8 +155,7 @@ CREATE TRUSTED PROCEDURAL LANGUAGE plpgsql should be invoked for functions and trigger procedures where the language attribute is plpgsql. - - + diff --git a/doc/src/sgml/xtypes.sgml b/doc/src/sgml/xtypes.sgml index 3ab3dd7e79..5d3146a96c 100644 --- a/doc/src/sgml/xtypes.sgml +++ b/doc/src/sgml/xtypes.sgml @@ -6,55 +6,73 @@ extending + + This chapter needs to be updated for the version-1 function manager + interface. + + - As previously mentioned, there are two kinds of types - in PostgreSQL: base types (defined in a programming language) - and composite types. - Examples in this section up to interfacing indexes can - be found in complex.sql and complex.c. Composite examples - are in funcs.sql. + As previously mentioned, there are two kinds of types in + PostgreSQL: base types (defined in a + programming language) and composite types. This chapter describes + how to define new base types. - - User-Defined Types + + The examples in this section can be found in + complex.sql and complex.c + in the tutorial directory. Composite examples are in + funcs.sql. + - - Functions Needed for a User-Defined Type - - A user-defined type must always have input and output - functions. These functions determine how the type - appears in strings (for input by the user and output to - the user) and how the type is organized in memory. The - input function takes a null-delimited character string - as its input and returns the internal (in memory) - representation of the type. The output function takes the - internal representation of the type and returns a null - delimited character string. - Suppose we want to define a complex type which represents - complex numbers. Naturally, we choose to represent a - complex in memory as the following C structure: - - + + + input function + + + output function + + A user-defined type must always have input and output functions. + These functions determine how the type appears in strings (for input + by the user and output to the user) and how the type is organized in + memory. The input function takes a null-terminated character string + as its input and returns the internal (in memory) representation of + the type. The output function takes the internal representation of + the type and returns a null-terminated character string. + + + + Suppose we want to define a complex type which represents complex + numbers. Naturally, we would choose to represent a complex in memory + as the following C structure: + + typedef struct Complex { double x; double y; } Complex; - - - and a string of the form (x,y) as the external string - representation. - These functions are usually not hard to write, especially - the output function. However, there are a number of points - to remember: - - - - When defining your external (string) representation, - remember that you must eventually write a - complete and robust parser for that representation - as your input function! - - + + + and a string of the form (x,y) as the external string + representation. + + + + The functions are usually not hard to write, especially the output + function. However, there are a number of points to remember: + + + + + When defining your external (string) representation, remember + that you must eventually write a complete and robust parser for + that representation as your input function! + + + + For instance: + + Complex * complex_in(char *str) { @@ -69,11 +87,13 @@ complex_in(char *str) result->y = y; return (result); } - + + - The output function can simply be: + + The output function can simply be: - + char * complex_out(Complex *complex) { @@ -84,30 +104,30 @@ complex_out(Complex *complex) sprintf(result, "(%g,%g)", complex->x, complex->y); return(result); } - - - - - - - You should try to make the input and output - functions inverses of each other. If you do - not, you will have severe problems when you need - to dump your data into a file and then read it - back in (say, into someone else's database on - another computer). This is a particularly common - problem when floating-point numbers are - involved. - - - + + + + + - To define the complex type, we need to create the two - user-defined functions complex_in and complex_out - before creating the type: + You should try to make the input and output functions inverses of + each other. If you do not, you will have severe problems when + you need to dump your data into a file and then read it back in + (say, into someone else's database on another computer). This is + a particularly common problem when floating-point numbers are + involved. + + + + - + + To define the complex type, we need to create the two + user-defined functions complex_in and + complex_out before creating the type: + + CREATE FUNCTION complex_in(opaque) RETURNS complex AS 'PGROOT/tutorial/complex' @@ -117,47 +137,57 @@ CREATE FUNCTION complex_out(opaque) RETURNS opaque AS 'PGROOT/tutorial/complex' LANGUAGE C; + + + + Finally, we can declare the data type: + CREATE TYPE complex ( internallength = 16, input = complex_in, output = complex_out ); - - - - - As discussed earlier, PostgreSQL fully supports arrays of - base types. Additionally, PostgreSQL supports arrays of - user-defined types as well. When you define a type, - PostgreSQL automatically provides support for arrays of - that type. For historical reasons, the array type has - the same name as the user-defined type with the - underscore character _ prepended. - Composite types do not need any function defined on - them, since the system already understands what they - look like inside. - - + + - - Large Objects - - - If the values of your datatype might exceed a few hundred bytes in - size (in internal form), you should be careful to mark them TOASTable. - To do this, the internal representation must follow the standard - layout for variable-length data: the first four bytes must be an int32 - containing the total length in bytes of the datum (including itself). - Then, all your functions that accept values of the type must be careful - to call pg_detoast_datum() on the supplied values --- after checking - that the value is not NULL, if your function is not strict. Finally, - select the appropriate storage option when giving the CREATE TYPE - command. - - - - + + + arrays + + As discussed earlier, PostgreSQL fully + supports arrays of base types. Additionally, + PostgreSQL supports arrays of + user-defined types as well. When you define a type, + PostgreSQL automatically provides support + for arrays of that type. For historical reasons, the array type has + the same name as the user-defined type with the underscore character + _ prepended. + + + + Composite types do not need any function defined on them, since the + system already understands what they look like inside. + + + + + TOAST + and user-defined types + + If the values of your datatype might exceed a few hundred bytes in + size (in internal form), you should be careful to mark them + TOAST-able. To do this, the internal representation must follow the + standard layout for variable-length data: the first four bytes must + be an int32 containing the total length in bytes of the + datum (including itself). Then, all your functions that accept + values of the type must be careful to call + pg_detoast_datum() on the supplied values --- + after checking that the value is not NULL, if your function is not + strict. Finally, select the appropriate storage option when giving + the CREATE TYPE command. + +