From 45f79cae14998e7f67d39997f241f2ae538ffd81 Mon Sep 17 00:00:00 2001 From: "Thomas G. Lockhart" Date: Tue, 2 May 2000 20:02:03 +0000 Subject: [PATCH] Fixups in content and markup for 7.0 release. --- doc/src/sgml/Makefile | 22 +- doc/src/sgml/admin.sgml | 66 ++-- doc/src/sgml/advanced.sgml | 6 +- doc/src/sgml/biblio.sgml | 16 +- doc/src/sgml/bki.sgml | 54 ++- doc/src/sgml/config.sgml | 524 +++++++++++++------------- doc/src/sgml/cvs.sgml | 6 +- doc/src/sgml/datatype.sgml | 427 +++++++++++++-------- doc/src/sgml/datetime.sgml | 4 +- doc/src/sgml/dfunc.sgml | 232 ++++++------ doc/src/sgml/docguide.sgml | 256 +++++++++---- doc/src/sgml/ecpg.sgml | 16 +- doc/src/sgml/environ.sgml | 4 +- doc/src/sgml/func.sgml | 123 +++---- doc/src/sgml/history.sgml | 4 +- doc/src/sgml/indices.sgml | 3 +- doc/src/sgml/inherit.sgml | 6 +- doc/src/sgml/install.sgml | 15 +- doc/src/sgml/installation.sgml | 114 +++--- doc/src/sgml/libpq++.sgml | 4 +- doc/src/sgml/libpq.sgml | 46 +-- doc/src/sgml/lisp.sgml | 5 +- doc/src/sgml/lobj.sgml | 460 +++++++++++------------ doc/src/sgml/manage-ag.sgml | 6 +- doc/src/sgml/manage.sgml | 16 +- doc/src/sgml/notation.sgml | 22 +- doc/src/sgml/odbc.sgml | 115 +++--- doc/src/sgml/oper.sgml | 30 +- doc/src/sgml/plsql.sgml | 4 +- doc/src/sgml/ports.sgml | 123 ++++--- doc/src/sgml/postgres.sgml | 172 ++++----- doc/src/sgml/problems.sgml | 87 +++-- doc/src/sgml/programmer.sgml | 57 +-- doc/src/sgml/ref/create_database.sgml | 8 +- doc/src/sgml/ref/create_index.sgml | 12 +- doc/src/sgml/ref/create_table.sgml | 66 ++-- doc/src/sgml/ref/initdb.sgml | 12 +- doc/src/sgml/ref/pgctl-ref.sgml | 5 +- doc/src/sgml/ref/postmaster.sgml | 6 +- doc/src/sgml/ref/vacuumdb.sgml | 40 +- doc/src/sgml/reference.sgml | 141 ++++--- doc/src/sgml/regress.sgml | 610 ++++++++++++++++-------------- doc/src/sgml/release.sgml | 176 ++++++--- doc/src/sgml/rules.sgml | 33 +- doc/src/sgml/runtime.sgml | 4 +- doc/src/sgml/signals.sgml | 19 +- doc/src/sgml/spi.sgml | 5 +- doc/src/sgml/sql.sgml | 8 +- doc/src/sgml/start.sgml | 16 +- doc/src/sgml/syntax.sgml | 86 +++-- doc/src/sgml/trigger.sgml | 673 +++++++++++++++++++++------------- doc/src/sgml/tutorial.sgml | 67 ++-- doc/src/sgml/typeconv.sgml | 19 +- doc/src/sgml/user.sgml | 64 ++-- doc/src/sgml/xfunc.sgml | 307 ++++++++-------- doc/src/sgml/xtypes.sgml | 264 +++++++------ doc/src/sgml/y2k.sgml | 6 +- 57 files changed, 3221 insertions(+), 2471 deletions(-) diff --git a/doc/src/sgml/Makefile b/doc/src/sgml/Makefile index 864d4621c2..e99c145723 100644 --- a/doc/src/sgml/Makefile +++ b/doc/src/sgml/Makefile @@ -8,7 +8,7 @@ # # # IDENTIFICATION -# $Header: /cvsroot/pgsql/doc/src/sgml/Makefile,v 1.13 2000/01/14 22:11:31 petere Exp $ +# $Header: /cvsroot/pgsql/doc/src/sgml/Makefile,v 1.14 2000/05/02 20:01:51 thomas Exp $ # #---------------------------------------------------------------------------- @@ -67,17 +67,18 @@ vpath %.sgml ./ref MANSOURCES= $(wildcard ref/*.sgml) -APPLICATIONS= createdb.sgml createuser.sgml \ - createlang.sgml \ - dropdb.sgml dropuser.sgml \ - droplang.sgml \ +APPLICATIONS= createdb.sgml createlang.sgml createuser.sgml \ + dropdb.sgml droplang.sgml dropuser.sgml \ + ecpg-ref.sgml \ initdb.sgml initlocation.sgml \ ipcclean.sgml \ pg_dump.sgml \ pg_dumpall.sgml \ + pg_passwd.sgml \ pg_upgrade.sgml \ pgaccess-ref.sgml \ pgadmin-ref.sgml \ + pgctl-ref.sgml \ pgtclsh.sgml \ pgtksh.sgml \ postgres-ref.sgml \ @@ -87,8 +88,9 @@ APPLICATIONS= createdb.sgml createuser.sgml \ COMMANDS= abort.sgml alter_group.sgml alter_table.sgml alter_user.sgml \ begin.sgml \ - close.sgml cluster.sgml commit.sgml copy.sgml \ - create_aggregate.sgml create_database.sgml create_function.sgml create_group.sgml \ + close.sgml cluster.sgml comment.sgml commit.sgml copy.sgml \ + create_aggregate.sgml create_constraint.sgml create_database.sgml \ + create_function.sgml create_group.sgml \ create_index.sgml \ create_language.sgml create_operator.sgml create_rule.sgml create_sequence.sgml \ create_table.sgml create_table_as.sgml create_trigger.sgml create_type.sgml \ @@ -98,12 +100,12 @@ COMMANDS= abort.sgml alter_group.sgml alter_table.sgml alter_user.sgml \ drop_index.sgml \ drop_language.sgml drop_operator.sgml drop_rule.sgml drop_sequence.sgml \ drop_table.sgml drop_trigger.sgml drop_type.sgml drop_user.sgml drop_view.sgml \ - explain.sgml fetch.sgml grant.sgml \ + end.sgml explain.sgml fetch.sgml grant.sgml \ insert.sgml listen.sgml load.sgml lock.sgml move.sgml \ notify.sgml \ - reset.sgml revoke.sgml rollback.sgml \ + reindex.sgml reset.sgml revoke.sgml rollback.sgml \ select.sgml select_into.sgml set.sgml show.sgml \ - unlisten.sgml update.sgml vacuum.sgml + truncate.sgml unlisten.sgml update.sgml vacuum.sgml FUNCTIONS= current_date.sgml current_time.sgml current_timestamp.sgml current_user.sgml diff --git a/doc/src/sgml/admin.sgml b/doc/src/sgml/admin.sgml index a3ce1cc52f..936ce863c6 100644 --- a/doc/src/sgml/admin.sgml +++ b/doc/src/sgml/admin.sgml @@ -1,5 +1,5 @@ - PostgreSQL Administrator's Guide - - Covering v6.5 for general release - - - The PostgreSQL Development Team - + PostgreSQL Administrator's Guide + + Covering v7.0 for general release + + + The PostgreSQL Development Team + - - Thomas - Lockhart - - Caltech/JPL - - + + Thomas + Lockhart + + Caltech/JPL + + @@ -63,17 +63,17 @@ Derived from postgres.sgml. TGL --> - (last updated 1999-06-01) - + (last updated 2000-05-01) + - - - PostgreSQL is Copyright © 1996-9 - by the Postgres Global Development Group. - - + + + PostgreSQL is Copyright © 1996-2000 + by PostgreSQL Inc. + + - + - - Summary + + Summary - - Postgres, + + Postgres, developed originally in the UC Berkeley Computer Science Department, pioneered many of the object-relational concepts now becoming available in some commercial databases. It provides SQL92/SQL3 language support, transaction integrity, and type extensibility. - PostgreSQL is an open-source descendant + PostgreSQL is an open-source descendant of this original Berkeley code. - - + + &intro-ag; @@ -128,7 +128,7 @@ Don't bother with an index until we get some index entries. --> - + @@ -103,12 +103,12 @@ SELECT c.name, c.altitude +----------+----------+ - Here the * after cities indicates that the query should + Here the "*" after cities indicates that the query should be run over cities and all classes below cities in the inheritance hierarchy. Many of the commands that we have already discussed (SELECT, UPDATE and DELETE) - support this * notation, as do others, like + support this inheritance notation using "*" as do other commands like ALTER. diff --git a/doc/src/sgml/biblio.sgml b/doc/src/sgml/biblio.sgml index 87124d0091..fc33e9bc8a 100644 --- a/doc/src/sgml/biblio.sgml +++ b/doc/src/sgml/biblio.sgml @@ -1,5 +1,5 @@ @@ -15,7 +15,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/biblio.sgml,v 1.12 2000/03/31 03:27:40 thom Postgres development team are available at - http://s2k-ftp.CS.Berkeley.EDU:8000/postgres/papers/ + the University of California, Berkeley, Computer Science + Department web site @@ -235,7 +236,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/biblio.sgml,v 1.12 2000/03/31 03:27:40 thom Lockhart - 1998-10-01 + 2000-05-01 The PostgreSQL Global Development Group @@ -261,7 +262,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/biblio.sgml,v 1.12 2000/03/31 03:27:40 thom Lockhart - 1998-10-01 + 2000-05-01 The PostgreSQL Global Development Group @@ -287,7 +288,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/biblio.sgml,v 1.12 2000/03/31 03:27:40 thom Lockhart - 1998-10-01 + 2000-05-01 The PostgreSQL Global Development Group @@ -313,7 +314,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/biblio.sgml,v 1.12 2000/03/31 03:27:40 thom Lockhart - 1998-10-01 + 2000-05-01 The PostgreSQL Global Development Group @@ -339,7 +340,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/biblio.sgml,v 1.12 2000/03/31 03:27:40 thom Lockhart - 1998-10-01 + 2000-05-01 The PostgreSQL Global Development Group @@ -585,6 +586,7 @@ http://simon.cs.cornell.edu/home/praveen/papers/partindex.de95.ps.Z + Seshardri, 1995 diff --git a/doc/src/sgml/bki.sgml b/doc/src/sgml/bki.sgml index 4a61fd6d6b..938a99d462 100644 --- a/doc/src/sgml/bki.sgml +++ b/doc/src/sgml/bki.sgml @@ -1,5 +1,5 @@ diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index f1fe641231..59eda7a637 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -1,21 +1,23 @@ - -Configuration Options + + Configuration Options - - Parameters for Configuration (<application>configure</application>) + + Parameters for Configuration + (<application>configure</application>) - - The full set of parameters available in configure - can be obtained by typing + + The full set of parameters available in configure + can be obtained by typing - - $ ./configure --help - - - - The following parameters may be of interest to installers: + +$ ./configure --help + + + + + The following parameters may be of interest to installers: - + Directories to install PostgreSQL in: --prefix=PREFIX install architecture-independent files in PREFIX [/usr/local/pgsql] @@ -54,191 +56,198 @@ Features and packages: --with-CXX=compiler use specific C++ compiler --without-CXX prevent building C++ code - - - - Some systems may have trouble building a specific feature of - Postgres. For example, systems with a damaged - C++ compiler may need to specify to instruct - the build procedure to skip construction of libpq++. - - - Use the and - options if you want to build - Postgres using include files or libraries - that are not installed in your system's standard search path. For - example, you might use these to build with an experimental version of - Tcl. If you need to specify more than one nonstandard directory for - include files or libraries, do it like this: - - --with-includes="/opt/tcl/include /opt/perl5/include" - - - - - - - Parameters for Building (<application>make</application>) + + + + + Some systems may have trouble building a specific feature of + Postgres. For example, systems with a damaged + C++ compiler may need to specify to instruct + the build procedure to skip construction of libpq++. + + + + Use the and + options if you want to build + Postgres using include files or libraries + that are not installed in your system's standard search path. For + example, you might use these to build with an experimental version of + Tcl. If you need to specify more than one nonstandard directory for + include files or libraries, do it like this: + + +--with-includes="/opt/tcl/include /opt/perl5/include" + + + + + + Parameters for Building (<application>make</application>) - - Many installation-related parameters can be set in the building - stage of Postgres installation. - - - In most cases, these parameters should be placed in a file, - Makefile.custom, intended just for that purpose. - The default distribution does not contain this optional file, so you - will create it using a text editor of your choice. When upgrading installations, - you can simply copy your old Makefile.custom to the new installation before - doing the build. - - - Alternatively, you can set variables on the make - command line: - - make [ variable=value [...] ] - - - - A few of the many variables that can be specified are: - - - - - POSTGRESDIR - - - - Top of the installation tree. - - - - - - - BINDIR - - - - Location of applications and utilities. - - - - - - - LIBDIR - - - - Location of object libraries, including shared libraries. - - - - - - - HEADERDIR - - - - Location of include files. - - - - - - - ODBCINST - - - - Location of installation-wide psqlODBC - (ODBC) configuration file. - - - - - - - - There are other optional parameters which are not as commonly used. - Many of those listed below are appropriate when doing - Postgres server code development. + + Many installation-related parameters can be set in the building + stage of Postgres installation. + + + + In most cases, these parameters should be placed in a file, + Makefile.custom, intended just for that purpose. + The default distribution does not contain this optional file, so you + will create it using a text editor of your choice. When upgrading installations, + you can simply copy your old Makefile.custom to the new installation before + doing the build. + + + + Alternatively, you can set variables on the make + command line: + + +make [ variable=value [...] ] + + + + + A few of the many variables that can be specified are: - - - - CFLAGS - - - - Set flags for the C compiler. - Should be assigned with "+=" to retain relevant default parameters. - - - - - - - YFLAGS - - - - Set flags for the yacc/bison parser. might be - used to help diagnose problems building a new parser. - Should be assigned with "+=" to retain relevant default parameters. - - - - - - - USE_TCL - - - - Enable Tcl interface building. - - - - - - - HSTYLE - - - - DocBook HTML style sheets for building the - documentation from scratch. - Not used unless you are developing new documentation from the - DocBook-compatible SGML source documents in - doc/src/sgml/. - - - - - - - PSTYLE - - - - DocBook style sheets for building printed documentation from scratch. - Not used unless you are developing new documentation from the - DocBook-compatible SGML source documents in - doc/src/sgml/. - - - - - - - - Here is an example Makefile.custom for a - PentiumPro Linux system: + + + + POSTGRESDIR + + + + Top of the installation tree. + + + + + + + BINDIR + + + + Location of applications and utilities. + + + + + + + LIBDIR + + + + Location of object libraries, including shared libraries. + + + + + + + HEADERDIR + + + + Location of include files. + + + + + + + ODBCINST + + + + Location of installation-wide psqlODBC + (ODBC) configuration file. + + + + + + + + There are other optional parameters which are not as commonly used. + Many of those listed below are appropriate when doing + Postgres server code development. + + + + + CFLAGS + + + + Set flags for the C compiler. + Should be assigned with "+=" to retain relevant default parameters. + + + + + + + YFLAGS + + + + Set flags for the yacc/bison parser. might be + used to help diagnose problems building a new parser. + Should be assigned with "+=" to retain relevant default parameters. + + + + + + + USE_TCL + + + + Enable Tcl interface building. + + + + + + + HSTYLE + + + + DocBook HTML style sheets for building the + documentation from scratch. + Not used unless you are developing new documentation from the + DocBook-compatible SGML source documents in + doc/src/sgml/. + + + + + + + PSTYLE + + + + DocBook style sheets for building printed documentation from scratch. + Not used unless you are developing new documentation from the + DocBook-compatible SGML source documents in + doc/src/sgml/. + + + + + + + + + Here is an example Makefile.custom for a + PentiumPro Linux system: - + # Makefile.custom # Thomas Lockhart 1999-06-01 @@ -249,21 +258,22 @@ CFLAGS+= -m486 -O2 HSTYLE= /home/tgl/SGML/db118.d/docbook/html PSTYLE= /home/tgl/SGML/db118.d/docbook/print - - - - - Locale Support + + + + + + Locale Support - - - - Written by Oleg Bartunov. - See Oleg's web page - for additional information on locale and Russian language support. - - - + + + + Written by Oleg Bartunov. + See Oleg's web page + for additional information on locale and Russian language support. + + + While doing a project for a company in Moscow, Russia, I encountered the problem that postgresql had no support of national alphabets. After looking for possible workarounds @@ -271,7 +281,7 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print I'm not a C-programer but already had some experience with locale programming when I work with perl (debugging) and glimpse. After several days of digging through - the Postgres source tree I made very minor corections to + the Postgres source tree I made very minor corections to src/backend/utils/adt/varlena.c and src/backend/main/main.c and got what I needed! I did support only for LC_CTYPE and LC_COLLATE, @@ -280,13 +290,13 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print and (to my surprise) it was incorporated into the Postgres distribution. - + People often complain that locale doesn't work for them. There are several common mistakes: - - - + + + Didn't properly configure postgresql before compilation. You must run configure with --enable-locale option to enable locale support. Didn't setup environment correctly when starting postmaster. @@ -297,24 +307,24 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print I use following shell script (runpostgres): - + #!/bin/sh export LC_CTYPE=koi8-r export LC_COLLATE=koi8-r postmaster -B 1024 -S -D/usr/local/pgsql/data/ -o '-Fe' - + and run it from rc.local as - + /bin/su - postgres -c "/home/postgres/runpostgres" - + - - - - + + + + Broken locale support in OS (for example, locale support in libc under Linux several times has changed and this caused a lot of problems). Latest perl has also support of @@ -333,10 +343,10 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print perl: warning: Falling back to the standard locale ("C"). - - - - + + + + Wrong location of locale files! Possible locations include: @@ -348,15 +358,15 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print Under Linux I did a symbolic link between /usr/lib/locale and /usr/share/locale to be sure that the next libc will not break my locale. - - - + + + - - What are the Benefits? + + What are the Benefits? - + You can use ~* and order by operators for strings contain characters from national alphabets. Non-english users definitely need that. If you won't use locale stuff just undefine @@ -364,20 +374,20 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print - - What are the Drawbacks? + + What are the Drawbacks? - + There is one evident drawback of using locale - its speed! So, use locale only if you really need it. - - Kerberos Authentication + + Kerberos Authentication - + Kerberos is an industry-standard secure authentication system suitable for distributed computing over a public network. @@ -388,7 +398,7 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print The Kerberos - authentication system is not distributed with Postgres. Versions of + authentication system is not distributed with Postgres. Versions of Kerberos are typically available as optional software from operating system vendors. In addition, a source code distribution may be obtained through @@ -414,9 +424,9 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print MIT Project Athena. Note that FAQLs (Frequently-Asked Questions Lists) are periodically posted to the - Kerberos mailing list + Kerberos mailing list (send - mail to subscribe), + mail to subscribe), and USENET news group. @@ -435,19 +445,19 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print is somehow readable by the Postgres account. - Postgres and its clients can be compiled to use + Postgres and its clients can be compiled to use either Version 4 or Version 5 of the MIT Kerberos protocols by setting the KRBVERS variable in the file src/Makefile.global to the appropriate value. You can also change the location where - Postgres + Postgres expects to find the associated libraries, header files and its own server key file. - After compilation is complete, Postgres + After compilation is complete, Postgres must be registered as a Kerberos service. See the Kerberos Operations Notes @@ -459,7 +469,7 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print Operation - After initial installation, Postgres + After initial installation, Postgres should operate in all ways as a normal Kerberos service. For details on the use of authentication, see the @@ -477,13 +487,13 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print User principal names (anames) are assumed to - contain the actual Unix/Postgres user name + contain the actual Unix/Postgres user name in the first component. - The Postgres service is assumed to be have two components, + The Postgres service is assumed to be have two components, the service name and a hostname, canonicalized as in Version 4 (i.e., with all domain suffixes removed). @@ -491,6 +501,7 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print + Kerberos Parameter Examples @@ -543,3 +554,20 @@ PSTYLE= /home/tgl/SGML/db118.d/docbook/print + + diff --git a/doc/src/sgml/cvs.sgml b/doc/src/sgml/cvs.sgml index a158a5c2c0..50f9a0ccbc 100644 --- a/doc/src/sgml/cvs.sgml +++ b/doc/src/sgml/cvs.sgml @@ -1,5 +1,5 @@ @@ -88,7 +88,7 @@ $ cvs checkout -r REL6_4 tc 1.6 - then the tag TAG will reference + then the tag "TAG" will reference file1-1.2, file2-1.3, etc. @@ -606,7 +606,7 @@ $ which cvsup who are actively maintaining the code base originally developed by the DEC Systems Research Center. - The PM3 RPM distribution is roughly + The PM3 RPM distribution is roughly 30MB compressed. At the time of writing, the 1.1.10-1 release installed cleanly on RH-5.2, whereas the 1.1.11-1 release is apparently built for another release (RH-6.0?) and does not run on RH-5.2. diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index a80674bf62..1900b513be 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -1,5 +1,5 @@ @@ -262,9 +262,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/datatype.sgml,v 1.29 2000/04/14 15:08:56 th - The original Postgres v4.2 code received from - Berkeley rounded all double precision floating point results to six digits for - output. Starting with v6.1, floating point numbers are allowed to retain + Floating point numbers are allowed to retain most of the intrinsic precision of the type (typically 15 digits for doubles, 6 digits for 4-byte floats). Other types with underlying floating point fields (e.g. geometric @@ -277,8 +275,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/datatype.sgml,v 1.29 2000/04/14 15:08:56 th Numeric Types - Numeric types consist of two- and four-byte integers and four- and eight-byte - floating point numbers. + Numeric types consist of two- and four-byte integers, four- and eight-byte + floating point numbers and fixed-precision decimals. @@ -299,7 +297,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/datatype.sgml,v 1.29 2000/04/14 15:08:56 th decimal variable User-specified precision - no limit + ~8000 digits float4 @@ -554,13 +552,13 @@ CREATE TABLE tablename (Date/Time Types - PostgreSQL supports the full set of + Postgres supports the full set of SQL date and time types.
- <productname>PostgreSQL</productname> Date/Time Types + <productname>Postgres</productname> Date/Time TypesDate/Time @@ -576,7 +574,7 @@ CREATE TABLE tablename ( timestamp - for data containing both date and time + both date and time 8 bytes 4713 BC AD 1465001 @@ -584,7 +582,7 @@ CREATE TABLE tablename ( timestamp with time zone - date and time including time zone + date and time with time zone 8 bytes 1903 AD 2037 AD @@ -600,7 +598,7 @@ CREATE TABLE tablename ( date - for data containing only dates + dates only 4 bytes 4713 BC 32767 AD @@ -608,7 +606,7 @@ CREATE TABLE tablename ( time - for data containing only times of the day + times of day only 4 bytes 00:00:00.00 23:59:59.99 @@ -616,7 +614,7 @@ CREATE TABLE tablename ( time with time zone - times of the day + times of day only 4 bytes 00:00:00.00+12 23:59:59.99-12 @@ -628,13 +626,17 @@ CREATE TABLE tablename ( - To ensure compatibility to earlier versions of PostgreSQL + To ensure compatibility to earlier versions of Postgres we also continue to provide datetime (equivalent to timestamp) and - timespan (equivalent to interval). The types abstime + timespan (equivalent to interval), + however support for these is now restricted to having an + implicit translation to timestamp and + interval. + The types abstime and reltime are lower precision types which are used internally. You are discouraged from using any of these types in new applications and are encouraged to move any old - ones over when appropriate. Any or all of these types might disappear in a future release. + ones over when appropriate. Any or all of these internal types might disappear in a future release. @@ -648,11 +650,11 @@ CREATE TABLE tablename (ISO-8601, SQL-compatible, traditional Postgres, and others. The ordering of month and day in date input can be ambiguous, therefore a setting - exists to specify how it should be interpreted. The command + exists to specify how it should be interpreted in ambiguous cases. The command SET DateStyle TO 'US' or SET DateStyle TO 'NonEuropean' - specifies the variant month before day, the command + specifies the variant "month before day", the command SET DateStyle TO 'European' sets the variant - day before month. The ISO style + "day before month". The ISO style is the default but this default can be changed at compile time or at run time. @@ -672,7 +674,7 @@ CREATE TABLE tablename (date type.
- <productname>PostgreSQL</productname> Date Input + <productname>Postgres</productname> Date InputDate Inputs @@ -703,10 +705,6 @@ CREATE TABLE tablename (US; read as January 18 in any mode - 1999.008 - Year and day of year - - 19990108 ISO-8601 year, month, day @@ -724,7 +722,7 @@ CREATE TABLE tablename ( January 8, 99 BC - Year 99 before the common era + Year 99 before the Common Era @@ -733,7 +731,7 @@ CREATE TABLE tablename (
- <productname>PostgreSQL</productname> Month Abbreviations + <productname>Postgres</productname> Month AbbreviationsMonth Abbreviations @@ -800,7 +798,7 @@ CREATE TABLE tablename (
- <productname>PostgreSQL</productname> Day of Week Abbreviations + <productname>Postgres</productname> Day of Week AbbreviationsDay of Week Abbreviations @@ -850,7 +848,7 @@ CREATE TABLE tablename (time inputs.
- <productname>PostgreSQL</productname> Time Input + <productname>Postgres</productname> Time InputTime Inputs @@ -904,13 +902,14 @@ CREATE TABLE tablename ( time with time zone + This type is defined by SQL92, but the definition exhibits - fundamental deficiencies which renders the type near useless. In + fundamental deficiencies which renders the type nearly useless. In most cases, a combination of date, - time, and timestamp with time zone + time, and timestamp should provide a complete range of date/time functionality - required by an application. + required by any application. @@ -919,7 +918,7 @@ CREATE TABLE tablename ( - <productname>PostgreSQL</productname> Time With Time + <title><productname>Postgres</productname> Time With Time Zone Input Time With Time Zone Inputs @@ -959,89 +958,97 @@ CREATE TABLE tablename ( timestamp - - Valid input for the timestamp type consists of a concatenation - of a date and a time, followed by an optional AD or - BC, followed by an optional time zone. (See below.) - Thus - + + + Valid input for the timestamp type consists of a concatenation + of a date and a time, followed by an optional AD or + BC, followed by an optional time zone. (See below.) + Thus + + 1999-01-08 04:05:06 -8:00 - - is a valid timestamp value, which is ISO-compliant. - In addition, the wide-spread format - + + + is a valid timestamp value, which is ISO-compliant. + In addition, the wide-spread format + + January 8 04:05:06 1999 PST - - is supported. - + + is supported. + - -
- <productname>PostgreSQL</productname> Time Zone Input - Time Zone Inputs - - - - Time Zone - Description - - - - - PST - Pacific Standard Time - - - -8:00 - ISO-8601 offset for PST - - - -800 - ISO-8601 offset for PST - - - -8 - ISO-8601 offset for PST - - - -
-
+ + + <productname>Postgres</productname> Time Zone Input + Time Zone Inputs + + + + Time Zone + Description + + + + + PST + Pacific Standard Time + + + -8:00 + ISO-8601 offset for PST + + + -800 + ISO-8601 offset for PST + + + -8 + ISO-8601 offset for PST + + + +
+
interval + intervals can be specified with the following syntax: - + + Quantity Unit [Quantity Unit...] [Direction] @ Quantity Unit [Direction] - - where: Quantity is ..., -1, - 0, 1, 2, ...; - Unit is second, - minute, hour, day, - week, month, year, - decade, century, millennium, - or abbreviations or plurals of these units; - Direction can be ago or - empty. - - + + + where: Quantity is ..., -1, + 0, 1, 2, ...; + Unit is second, + minute, hour, day, + week, month, year, + decade, century, millennium, + or abbreviations or plurals of these units; + Direction can be ago or + empty. +
+ - Special values - - The following SQL-compatible functions can be used as date or time - input for the corresponding datatype: CURRENT_DATE, - CURRENT_TIME, CURRENT_TIMESTAMP. - - - PostgreSQL also supports several special constants for - convenience. + Special values + + + The following SQL-compatible functions can be used as date or time + input for the corresponding datatype: CURRENT_DATE, + CURRENT_TIME, CURRENT_TIMESTAMP. + + + Postgres also supports several special constants for + convenience. - <productname>PostgresSQL</productname> Special Date/Time Constants + <productname>Postgres</productname> Special Date/Time ConstantsConstants @@ -1110,7 +1117,7 @@ January 8 04:05:06 1999 PST The default is the ISO format.
- <productname>PostgreSQL</productname> Date/Time Output Styles + <productname>Postgres</productname> Date/Time Output StylesStyles @@ -1148,7 +1155,7 @@ January 8 04:05:06 1999 PST The output of the date and time styles is of course - only the date or time part in accordance with the above examples + only the date or time part in accordance with the above examples. @@ -1157,22 +1164,25 @@ January 8 04:05:06 1999 PST at Date/Time Input, how this setting affects interpretation of input values.)
- <productname>PostgreSQL</productname> Date Order Conventions - Order + <productname>Postgres</productname> Date Order Conventions + Date Order Style Specification + Description Example European + day/month/year 17/12/1997 15:37:16.00 MET US + month/day/year 12/17/1997 07:37:16.00 PST @@ -1181,9 +1191,10 @@ January 8 04:05:06 1999 PST - interval output looks like the input format, expect that units like + interval output looks like the input format, except that units like week or century are converted to years and days. In ISO mode the output looks like + [ Quantity Units [ ... ] ] [ Days ] Hours:Minutes [ ago ] @@ -1219,7 +1230,7 @@ January 8 04:05:06 1999 PST Time Zones - PostgreSQL endeavors to be compatible with + Postgres endeavors to be compatible with SQL92 definitions for typical usage. However, the SQL92 standard has an odd mix of date and time types and capabilities. Two obvious problems are: @@ -1249,7 +1260,7 @@ January 8 04:05:06 1999 PST - To address these difficulties, PostgreSQL + To address these difficulties, Postgres associates time zones only with date and time types which contain both date and time, and assumes local time for any type containing only @@ -1260,7 +1271,7 @@ January 8 04:05:06 1999 PST - PostgreSQL obtains time zone support + Postgres obtains time zone support from the underlying operating system for dates between 1902 and 2038 (near the typical date limits for Unix-style systems). Outside of this range, all dates are assumed to be @@ -1322,7 +1333,7 @@ January 8 04:05:06 1999 PST Internals - PostgreSQL uses Julian dates + Postgres uses Julian dates for all date/time calculations. They have the nice property of correctly predicting/calculating any date more recent than 4713BC to far into the future, using the assumption that the length of the @@ -1476,13 +1487,32 @@ January 8 04:05:06 1999 PST point is specified using the following syntax: - -( x , y ) - x , y -where - x is the x-axis coordinate as a floating point number - y is the y-axis coordinate as a floating point number - + +( x , y ) + x , y + + + where the arguments are + + + + x + + + The x-axis coordinate as a floating point number. + + + + + + y + + + The y-axis coordinate as a floating point number. + + + + @@ -1495,13 +1525,26 @@ where lseg is specified using the following syntax: - -( ( x1 , y1 ) , ( x2 , y2 ) ) - ( x1 , y1 ) , ( x2 , y2 ) - x1 , y1 , x2 , y2 -where - (x1,y1) and (x2,y2) are the endpoints of the segment - + + +( ( x1 , y1 ) , ( x2 , y2 ) ) + ( x1 , y1 ) , ( x2 , y2 ) + x1 , y1 , x2 , y2 + + + where the arguments are + + + + (x1,y1) + (x2,y2) + + + The endpoints of the line segment. + + + + @@ -1516,14 +1559,28 @@ where box is specified using the following syntax: - -( ( x1 , y1 ) , ( x2 , y2 ) ) - ( x1 , y1 ) , ( x2 , y2 ) - x1 , y1 , x2 , y2 -where - (x1,y1) and (x2,y2) are opposite corners - + +( ( x1 , y1 ) , ( x2 , y2 ) ) + ( x1 , y1 ) , ( x2 , y2 ) + x1 , y1 , x2 , y2 + + + where the arguments are + + + + (x1,y1) + (x2,y2) + + + Opposite corners of the box. + + + + + + Boxes are output using the first syntax. The corners are reordered on input to store the lower left corner first and the upper right corner last. @@ -1546,24 +1603,37 @@ where isopen(p) and isclosed(p) - are supplied to select either type in a query. + are supplied to test for either type in a query. path is specified using the following syntax: - -( ( x1 , y1 ) , ... , ( xn , yn ) ) -[ ( x1 , y1 ) , ... , ( xn , yn ) ] - ( x1 , y1 ) , ... , ( xn , yn ) - ( x1 , y1 , ... , xn , yn ) - x1 , y1 , ... , xn , yn -where - (x1,y1),...,(xn,yn) are points 1 through n - a leading "[" indicates an open path - a leading "(" indicates a closed path - + +( ( x1 , y1 ) , ... , ( xn , yn ) ) +[ ( x1 , y1 ) , ... , ( xn , yn ) ] + ( x1 , y1 ) , ... , ( xn , yn ) + ( x1 , y1 , ... , xn , yn ) + x1 , y1 , ... , xn , yn + + + where the arguments are + + + + (x,y) + + + Endpoints of the line segments comprising the path. + A leading square bracket ("[") indicates an open path, while + a leading parenthesis ("(") indicates a closed path. + + + + + + Paths are output using the first syntax. Note that Postgres versions prior to v6.1 used a format for paths which had a single leading parenthesis, @@ -1587,19 +1657,33 @@ where polygon is specified using the following syntax: - -( ( x1 , y1 ) , ... , ( xn , yn ) ) - ( x1 , y1 ) , ... , ( xn , yn ) - ( x1 , y1 , ... , xn , yn ) - x1 , y1 , ... , xn , yn -where - (x1,y1),...,(xn,yn) are points 1 through n - + +( ( x1 , y1 ) , ... , ( xn , yn ) ) + ( x1 , y1 ) , ... , ( xn , yn ) + ( x1 , y1 , ... , xn , yn ) + x1 , y1 , ... , xn , yn + + + where the arguments are + + + + (x,y) + + + Endpoints of the line segments comprising the boundary of the + polygon. + + + + + + Polygons are output using the first syntax. Note that Postgres versions prior to v6.1 used a format for polygons which had a single leading parenthesis, the list - of x-axis coordinates, the list of y-axis coordinates, + of x-axis coordinates, the list of y-axis coordinates, followed by a closing parenthesis. The built-in function upgradepoly is supplied to convert polygons dumped and reloaded from pre-v6.1 databases. @@ -1616,16 +1700,37 @@ where circle is specified using the following syntax: - -< ( x , y ) , r > -( ( x , y ) , r ) - ( x , y ) , r - x , y , r -where - (x,y) is the center of the circle - r is the radius of the circle - + +< ( x , y ) , r > +( ( x , y ) , r ) + ( x , y ) , r + x , y , r + + + where the arguments are + + + + (x,y) + + + Center of the circle. + + + + + + r + + + Radius of the circle. + + + + + + Circles are output using the first syntax. diff --git a/doc/src/sgml/datetime.sgml b/doc/src/sgml/datetime.sgml index bfc3666a9f..dcc63d5564 100644 --- a/doc/src/sgml/datetime.sgml +++ b/doc/src/sgml/datetime.sgml @@ -1,5 +1,5 @@ @@ -645,7 +645,7 @@ Date/time details - Julian Day is different from Julian Date. + "Julian Day" is different from "Julian Date". The Julian calendar was introduced by Julius Caesar in 45 BC. It was in common use until the 1582, when countries started changing to the diff --git a/doc/src/sgml/dfunc.sgml b/doc/src/sgml/dfunc.sgml index c7a0bba563..ccdb25e60c 100644 --- a/doc/src/sgml/dfunc.sgml +++ b/doc/src/sgml/dfunc.sgml @@ -1,5 +1,5 @@ @@ -7,105 +7,6 @@ $Header: /cvsroot/pgsql/doc/src/sgml/dfunc.sgml,v 1.9 2000/03/31 03:27:40 thomas - - After you have created and registered a user-defined function, your work is essentially done. Postgres, @@ -120,8 +21,6 @@ procedure. describes how to perform the compilation and link-editing required before you can load your user-defined functions into a running Postgres server. - Note that - this process has changed as of Version 4.2. <acronym>DEC OSF/1</acronym> @@ -327,14 +251,15 @@ procedure. file with special compiler flags and a shared library must be produced. The necessary steps with HP-UX are as follows. The +z - flag to the HP-UX C compiler produces so-called - "Position Independent Code" (PIC) and the +u flag - removes + flag to the HP-UX C compiler produces + Position Independent Code (PIC) + and the +u flag removes some alignment restrictions that the PA-RISC architecture normally enforces. The object file must be turned into a shared library using the HP-UX link editor with the -b option. This sounds complicated but is actually very simple, since the commands to do it are just: + # simple HP-UX example % cc +z +u -c foo.c @@ -375,6 +300,95 @@ procedure. command line. + + + @@ -45,7 +23,7 @@ Add a note on sgml-tools that they are now working with jade and so The purpose of documentation is to make Postgres - easier to learn, use, and develop. + easier to learn, use, and extend.. The documentation set should describe the Postgres system, language, and interfaces. It should be able to answer @@ -61,18 +39,26 @@ Add a note on sgml-tools that they are now working with jade and so formats: - + + Plain text for pre-installation information. - - + + + + HTML, for on-line browsing and reference. - - - Hardcopy, for in-depth reading and reference. - - + + + + + Hardcopy (Postscript or PDF), for in-depth reading and reference. + + + + man pages, for quick reference. - + + @@ -983,7 +969,7 @@ $ make man - Hardcopy Generation for v6.5 + Hardcopy Generation for v7.0 The hardcopy Postscript documentation is generated by converting the @@ -1084,14 +1070,14 @@ $ make man - Export the result as ASCII Layout. + Export the result as "ASCII Layout". Using emacs or vi, clean up the tabular information in - INSTALL. Remove the mailto + INSTALL. Remove the "mailto" URLs for the porting contributors to shrink the column heights. @@ -1104,19 +1090,21 @@ $ make man Several areas are addressed while generating Postscript - hardcopy. + hardcopy, including RTF repair, ToC generation, and page break + adjustments. Applixware <acronym>RTF</acronym> Cleanup - Applixware does not seem to do a complete job of importing RTF - generated by jade/MSS. In particular, all text is given the - Header1 style attribute label, although the text - formatting itself is acceptable. Also, the Table of Contents page - numbers do not refer to the section listed in the table, but rather - refer to the page of the ToC itself. + jade, an integral part of the + hardcopy procedure, omits specifying a default style for body + text. In the past, this undiagnosed problem led to a long process + of Table of Contents (ToC) generation. However, with great help + from the ApplixWare folks the symptom was diagnosed and a + workaround is available. + @@ -1128,61 +1116,187 @@ $ make man - - - Open a new document in Applix Words and - then import the RTF file. - - - Print out the existing Table of Contents, to mark up in the following - few steps. + Repair the RTF file to correctly specify all + styles, in particular the default style. The field can be added + using vi or the following small + sed procedure: + + +#!/bin/sh +# fixrtf.sh +# Utility to repair slight damage in RTF files generated by jade +# Thomas Lockhart <lockhart@alumni.caltech.edu> +# +for i in $* ; do + mv $i $i.orig + cat $i.orig | sed 's#\\stylesheet#\\stylesheet{\\s0 Normal;}#' > $i +done + +exit + + + where the script is adding {\s0 Normal;} as + the zero-th style in the document. According to ApplixWare, the + RTF standard would prohibit adding an implicit zero-th style, + though M$Word happens to handle this case. - Insert figures into the document. Center each figure on the page using - the centering margins button. - - - Not all documents have figures. - You can grep the SGML source files for - the string graphic to identify those parts of the - documentation which may have figures. A few figures are replicated in - various parts of the documentation. + Open a new document in Applix Words and + then import the RTF file. - Work through the document, adjusting page breaks and table column - widths. + Generate a new ToC using ApplixWare. + + + + + Select the existing ToC lines, from the beginning of the first + character on the first line to the last character of the last + line. + + + + + + Build a new ToC using + Tools.BookBuilding.CreateToC. Select the + first three levels of headers for inclusion in the ToC. + This will + replace the existing lines imported in the RTF with a native + ApplixWare ToC. + + + + + + Adjust the ToC formatting by using + Format.Style, selecting each of the three + ToC styles, and adjusting the indents for First and + Left. Use the following values: + +
+ Indent Formatting for Table of Contents + + + + + Style + + + First Indent (inches) + + + Left Indent (inches) + + + + + + + + TOC-Heading 1 + + + 0.6 + + + 0.6 + + + + + + TOC-Heading 2 + + + 1.0 + + + 1.0 + + + + + + TOC-Heading 3 + + + 1.4 + + + 1.4 + + + + +
+
+ + - If a bibliography is present, Applix Words seems to mark all remaining - text after the first title as having an underlined attribute. Select - all remaining text, turn off underlining using the underlining button, - then explicitly underline each document and book title. + Work through the document to: + + + + + Adjust page breaks. + + + + + + Adjust table column widths. + + + + + + Insert figures into the document. Center each figure on the page using + the centering margins button on the ApplixWare toolbar. + + + + Not all documents have figures. + You can grep the SGML source files for + the string "graphic" to identify those parts of the + documentation which may have figures. A few figures are replicated in + various parts of the documentation. + + + + + - Work through the document, marking up the ToC hardcopy with the actual - page number of each ToC entry. + Replace the right-justified page numbers in the Examples and + Figures portions of the ToC with + correct values. This only takes a few minutes per document. - Replace the right-justified incorrect page numbers in the ToC with - correct values. This only takes a few minutes per document. + If a bibliography is present, remove the short + form reference title from each entry. The + DocBook stylesheets from Norm Walsh + seem to print these out, even though this is a subset of the + information immediately following. @@ -1195,7 +1309,7 @@ $ make man - Print the document + "Print" the document to a file in Postscript format. diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index 4ac1eba848..917493f2a8 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -1,5 +1,5 @@ @@ -32,16 +32,17 @@ $Header: /cvsroot/pgsql/doc/src/sgml/ecpg.sgml,v 1.13 2000/03/31 03:27:40 thomas This describes an embedded SQL in C package for Postgres. - It is written by Linus Tolke - and Michael Meskes. + It is written by Linus Tolke + and Michael Meskes. Permission is granted to copy and use in the same way as you are allowed - to copy and use the rest of the PostgreSQL. + to copy and use the rest of PostgreSQL.
+ Why Embedded <acronym>SQL</acronym>? @@ -472,8 +473,9 @@ struct sqlca The following list shows all the known incompatibilities. If you find one - not listed please notify Michael - Meskes. Note, however, that we list only incompatibilities from + not listed please notify + Michael Meskes. + Note, however, that we list only incompatibilities from a precompiler of another RDBMS to ecpg and not additional ecpg features that these RDBMS do not have. @@ -977,7 +979,7 @@ ECPGdo(__LINE__, NULL, "select res from mytable where index = ? ", This request is modified by the input variables, i.e. the variables that where not known at compile time but are to be entered in the request. Where the variables - should go the string contains ;. + should go the string contains ";". diff --git a/doc/src/sgml/environ.sgml b/doc/src/sgml/environ.sgml index 0e27901cae..dc5741a95d 100644 --- a/doc/src/sgml/environ.sgml +++ b/doc/src/sgml/environ.sgml @@ -38,8 +38,8 @@ $ export PATH to the .profile file in your home directory. From now on, we will assume that you have added the Postgres bin directory to your path. In addition, we - will make frequent reference to setting a shell - variable or setting an environment variable throughout + will make frequent reference to "setting a shell + variable" or "setting an environment variable" throughout this document. If you did not fully understand the last paragraph on modifying your search path, you should consult the Unix manual pages that describe your diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 8110c6325c..55e83fb4b8 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -18,7 +18,7 @@ SQL Functions - SQL functions are constructs + SQL functions are constructs defined by the SQL92 standard which have function-like syntax but which can not be implemented as simple functions. @@ -477,24 +477,6 @@ age('now', timestamp '1957-06-13') - timestamp(abstime) - timestamp - convert to timestamp - timestamp(abstime 'now') - - - timestamp(date) - timestamp - convert to timestamp - timestamp(date 'today') - - - timestamp(date,time) - timestamp - convert to timestamp - timestamp(timestamp '1998-02-24',time '23:07'); - - date_part(text,timestamp) float8 portion of date @@ -513,10 +495,10 @@ date_trunc('month',abstime 'now') - isfinite(abstime) - bool - a finite time? - isfinite(abstime 'now') + interval(reltime) + interval + convert to interval + interval(reltime '4 hours') isfinite(timestamp) @@ -537,10 +519,22 @@ reltime(interval '4 hrs') - interval(reltime) - interval - convert to interval - interval(reltime '4 hours') + timestamp(date) + timestamp + convert to timestamp + timestamp(date 'today') + + + timestamp(date,time) + timestamp + convert to timestamp + timestamp(timestamp '1998-02-24',time '23:07'); + + + to_char(timestamp,text) + text + convert to string + to_char(timestamp '1998-02-24','DD'); @@ -674,6 +668,10 @@ hour of day (01-12) + HH24 + hour of day (00-23) + + MI minute (00-59) @@ -810,7 +808,7 @@ month in Roman Numerals (I-XII; I=JAN) - upper case - rn + rm month in Roman Numerals (I-XII; I=JAN) - lower case @@ -874,29 +872,34 @@ to_timestamp and to_date skip blank space if the FX option is - not use. FX Must be specified as the first item + not used. FX must be specified as the first item in the template. - '\' - must be use as double \\, example '\\HH\\MI\\SS' + Backslash ("\") must be specified with a double backslash + ("\\"); for example '\\HH\\MI\\SS'. - '"' - string between a quotation marks is skipen and not is parsed. - If you want write '"' to output you must use \\", example '\\"YYYY Month\\"'. + A double quote ('"') between quotation marks is skipped and is not parsed. + If you want to write a double quote to output you must preceed + it with a double backslash ('\\"), for + example '\\"YYYY Month\\"'. - text - the PostgreSQL's to_char() support text without '"', but string - between a quotation marks is fastly and you have guarantee, that a text - not will interpreted as a keyword (format-picture), exapmle '"Hello Year: "YYYY'. + to_char supports text without a leading + double quote ('"'), but any string + between a quotation marks is rapidly handled and you are + guaranteed that it will not be interpreted as a template + keyword (example: '"Hello Year: "YYYY'). @@ -1213,19 +1216,19 @@ area(object) float8 - area of circle, ... + area of item area(box '((0,0),(1,1))') box(box,box) box - boxes to intersection box + intersection box box(box '((0,0),(1,1))',box '((0.5,0.5),(2,2))') center(object) point - center of circle, ... + center of item center(box '((0,0),(1,2))') @@ -1255,16 +1258,10 @@ length(object) float8 - length of line segment, ... + length of item length(path '((-1,0),(1,0))') - length(path) - float8 - length of path - length(path '((0,0),(1,1),(2,0))') - - pclose(path) path convert path to closed @@ -1324,91 +1321,91 @@ Not defined by this name. Implements the intersection operator '#' box(circle) box - convert circle to box + circle to box box('((0,0),2.0)'::circle) box(point,point) box - convert points to box + points to box box('(0,0)'::point,'(1,1)'::point) box(polygon) box - convert polygon to box + polygon to box box('((0,0),(1,1),(2,0))'::polygon) circle(box) circle - convert to circle + to circle circle('((0,0),(1,1))'::box) circle(point,float8) circle - convert to circle + point to circle circle('(0,0)'::point,2.0) lseg(box) lseg - convert diagonal to lseg + box diagonal to lseg lseg('((-1,0),(1,0))'::box) lseg(point,point) lseg - convert to lseg + points to lseg lseg('(-1,0)'::point,'(1,0)'::point) path(polygon) point - convert to path + polygon to path path('((0,0),(1,1),(2,0))'::polygon) point(circle) point - convert to point (center) + center point('((0,0),2.0)'::circle) point(lseg,lseg) point - convert to point (intersection) + intersection point('((-1,0),(1,0))'::lseg, '((-2,-2),(2,2))'::lseg) point(polygon) point - center of polygon + center point('((0,0),(1,1),(2,0))'::polygon) polygon(box) polygon - convert to polygon with 12 points + 12 point polygon polygon('((0,0),(1,1))'::box) polygon(circle) polygon - convert to 12-point polygon + 12-point polygon polygon('((0,0),2.0)'::circle) polygon(npts,circle) polygon - convert to npts polygon + npts polygon polygon(12,'((0,0),2.0)'::circle) polygon(path) polygon - convert to polygon + path to polygon polygon('((0,0),(1,1),(2,0))'::path) @@ -1438,19 +1435,19 @@ Not defined by this name. Implements the intersection operator '#' revertpoly(polygon) polygon - convert pre-v6.1 polygon + to pre-v6.1 revertpoly('((0,0),(1,1),(2,0))'::polygon) upgradepath(path) path - convert pre-v6.1 path + to pre-v6.1 upgradepath('(1,3,0,0,1,1,2,0)'::path) upgradepoly(polygon) polygon - convert pre-v6.1 polygon + to pre-v6.1 upgradepoly('(0,1,2,0,1,0)'::polygon) diff --git a/doc/src/sgml/history.sgml b/doc/src/sgml/history.sgml index 05f45f9931..8fd14b0168 100644 --- a/doc/src/sgml/history.sgml +++ b/doc/src/sgml/history.sgml @@ -1,5 +1,5 @@ @@ -189,7 +189,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/history.sgml,v 1.7 2000/03/31 03:27:40 thom <productname>PostgreSQL</productname> - By 1996, it became clear that the name Postgres95 would + By 1996, it became clear that the name "Postgres95" would not stand the test of time. We chose a new name, PostgreSQL, to reflect the relationship between the original Postgres and the more diff --git a/doc/src/sgml/indices.sgml b/doc/src/sgml/indices.sgml index 2ea3e90879..84cb5036d3 100644 --- a/doc/src/sgml/indices.sgml +++ b/doc/src/sgml/indices.sgml @@ -354,7 +354,8 @@ CREATE MEMSTORE ON <table> COLUMNS <cols> is an index built over a subset of a table; the subset is defined by a predicate. Postgres supported partial indices with arbitrary - predicates. I believe IBM's db2 for as/400 supports partial indices + predicates. I believe IBM's DB2 + for AS/400 supports partial indices using single-clause predicates. diff --git a/doc/src/sgml/inherit.sgml b/doc/src/sgml/inherit.sgml index d71abd163a..f50c4bb34d 100644 --- a/doc/src/sgml/inherit.sgml +++ b/doc/src/sgml/inherit.sgml @@ -1,5 +1,5 @@ @@ -78,12 +78,12 @@ SELECT c.name, c.altitude Madison | 845 - Here the * after cities indicates that the query should + Here the "*" after cities indicates that the query should be run over cities and all classes below cities in the inheritance hierarchy. Many of the commands that we have already discussed -- SELECT, UPDATE and DELETE -- - support this * notation, as do others, like + support this "*" notation, as do others, like ALTER TABLE.
diff --git a/doc/src/sgml/install.sgml b/doc/src/sgml/install.sgml index 2c1c5714cd..4096ae9bd3 100644 --- a/doc/src/sgml/install.sgml +++ b/doc/src/sgml/install.sgml @@ -1,5 +1,5 @@ @@ -33,7 +33,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/install.sgml,v 1.40 2000/04/14 23:04: work with other make programs. On GNU/Linux systems GNU make is the default tool, on other systems you may find that GNU make is installed under the name - gmake. + gmake. We will use that name from now on to indicate GNU make, no matter what name it has on your system. To test for GNU make enter @@ -612,23 +612,28 @@ libpq.so.2.1: cannot open shared object file: No such file or directory Run the regression tests against the installed server (using the sequential test method). If you didn't run the tests before installation, you should definitely do it now. - For detailed instructions see . + For detailed instructions see + . - To start playing around, set up the paths as explained above + To start experimenting with Postgres, + set up the paths as explained above and start the server. To create a database, type + > createdb testdb + Then enter + > psql testdb + to connect to that database. At the prompt you can enter SQL commands and start experimenting. diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index a6e55e3b13..e71b99fa2d 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -1,5 +1,5 @@ -PostgreSQL Installation Guide - - Covering v6.5 for general release - - - The PostgreSQL Development Team - + PostgreSQL Installation Guide + + Covering v7.0 for general release + + + The PostgreSQL Development Team + - - Thomas - Lockhart - - Caltech/JPL - - + + Thomas + Lockhart + + Caltech/JPL + + @@ -57,17 +57,17 @@ Postgres quick Installation Guide. TGL --> - (last updated 1999-06-01) - + (last updated 2000-05-01) + - - -PostgreSQL is Copyright © 1996-9 -by the Postgres Global Development Group. - - + + + PostgreSQL is Copyright © 1996-2000 + by PostgreSQL Inc. + + - + - -Summary - - -Postgres, - developed originally in the UC Berkeley Computer Science Department, - pioneered many of the object-relational concepts - now becoming available in some commercial databases. -It provides SQL92/SQL3 language support, - transaction integrity, and type extensibility. - PostgreSQL is an open-source descendant - of this original Berkeley code. - - - - -Introduction - - -This installation procedure makes some assumptions about the desired configuration -and runtime environment for your system. This may be adequate for many installations, -and is almost certainly adequate for a first installation. But you may want to -do an initial installation up to the point of unpacking the source tree -and installing documentation, and then print or browse the -Administrator's Guide. - - - -&ports; -&install; -&config; -&release; + + Summary + + + Postgres, + developed originally in the UC Berkeley Computer Science Department, + pioneered many of the object-relational concepts + now becoming available in some commercial databases. + It provides SQL92/SQL3 language support, + transaction integrity, and type extensibility. + PostgreSQL is an open-source descendant + of this original Berkeley code. + + + + + Introduction + + + This installation procedure makes some assumptions about the desired configuration + and runtime environment for your system. This may be adequate for many installations, + and is almost certainly adequate for a first installation. But you may want to + do an initial installation up to the point of unpacking the source tree + and installing documentation, and then print or browse the + Administrator's Guide. + + + + &ports; + &install; + &config; + &release; @@ -719,7 +719,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/libpq++.sgml,v 1.15 2000/04/19 21:21: PgDatabase::PutLine or when the last string has been received from the backend using PgDatabase::GetLine. - It must be issued or the backend may get out of sync with + It must be issued or the backend may get "out of sync" with the frontend. Upon return from this function, the backend is ready to receive the next query. diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index 0693d9695f..c14f9ee260 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -1,5 +1,5 @@ @@ -8,11 +8,11 @@ $Header: /cvsroot/pgsql/doc/src/sgml/libpq.sgml,v 1.37 2000/04/25 16:39:07 momji libpq is the C application programmer's interface to - PostgreSQL. libpq is a set + Postgres. libpq is a set of library routines that allow client programs to pass queries to the Postgres backend server and to receive the results of these queries. libpq is also the - underlying engine for several other PostgreSQL + underlying engine for several other Postgres application interfaces, including libpq++ (C++), libpgtcl (Tcl), Perl, and ecpg. So some aspects of libpq's behavior will be @@ -415,7 +415,7 @@ struct PQconninfoOption is leaked for each call to PQconndefaults(). - In PostgreSQL versions before 7.0, PQconndefaults() returned a pointer + In Postgres versions before 7.0, PQconndefaults() returned a pointer to a static array, rather than a dynamically allocated array. That wasn't thread-safe, so the behavior has been changed. @@ -484,7 +484,7 @@ libpq application programmers should be careful to maintain the PGconn abstraction. Use the accessor functions below to get at the contents of PGconn. Avoid directly referencing the fields of the PGconn structure because they are subject to change in the future. -(Beginning in PostgreSQL release 6.4, the +(Beginning in Postgres release 6.4, the definition of struct PGconn is not even provided in libpq-fe.h. If you have old code that accesses PGconn fields directly, you can keep using it by including libpq-int.h too, but you are encouraged to fix the code @@ -985,7 +985,7 @@ and is not thread-safe. PQprint Prints out all the tuples and, optionally, the attribute names to the specified output stream. - + void PQprint(FILE* fout, /* output stream */ const PGresult *res, const PQprintOpt *po); @@ -998,11 +998,11 @@ struct { pqbool expanded; /* expand tables */ pqbool pager; /* use pager for output if needed */ char *fieldSep; /* field separator */ - char *tableOpt; /* insert to HTML <table ...> */ - char *caption; /* HTML <caption> */ + char *tableOpt; /* insert to HTML table ... */ + char *caption; /* HTML caption */ char **fieldName; /* null terminated array of replacement field names */ } PQprintOpt; - + This function was formerly used by psql to print query results, but this is no longer the case and this function is no longer actively supported. @@ -1342,7 +1342,7 @@ is not currently open or the backend is not currently processing a query. Fast Path -PostgreSQL provides a fast path interface to send +Postgres provides a fast path interface to send function calls to the backend. This is a trapdoor into system internals and can be a potential security hole. Most users will not need this feature. @@ -1398,7 +1398,7 @@ typedef struct { Asynchronous Notification -PostgreSQL supports asynchronous notification via the +Postgres supports asynchronous notification via the LISTEN and NOTIFY commands. A backend registers its interest in a particular notification condition with the LISTEN command (and can stop listening with the UNLISTEN command). All backends listening on a @@ -1438,7 +1438,7 @@ be sure to free it with free() to avoid a memory leak. - In PostgreSQL 6.4 and later, + In Postgres 6.4 and later, the be_pid is the notifying backend's, whereas in earlier versions it was always your own backend's PID. @@ -1484,7 +1484,7 @@ if any notifications came in during the processing of the query. Functions Associated with the COPY Command - The COPY command in PostgreSQL has options to read from + The COPY command in Postgres has options to read from or write to the network connection used by libpq. Therefore, functions are necessary to access this network connection directly so applications may take advantage of this capability. @@ -1568,7 +1568,7 @@ The data returned will not extend beyond a newline character. If possible a whole line will be returned at one time. But if the buffer offered by the caller is too small to hold a line sent by the backend, then a partial data line will be returned. This can be detected by testing whether the -last returned byte is \n or not. +last returned byte is "\n" or not. The returned string is not null-terminated. (If you want to add a terminating null, be sure to pass a bufsize one smaller than the room actually available.) @@ -1585,7 +1585,7 @@ int PQputline(PGconn *conn, const char *string); Note the application must explicitly send the two -characters \. on a final line to indicate to +characters "\." on a final line to indicate to the backend that it has finished sending its data. @@ -1615,7 +1615,7 @@ specified directly. sent to the backend using PQputline or when the last string has been received from the backend using PGgetline. It must be issued or the backend - may get out of sync with the frontend. Upon + may get "out of sync" with the frontend. Upon return from this function, the backend is ready to receive the next query. The return value is 0 on successful completion, @@ -1718,7 +1718,7 @@ PQsetNoticeProcessor(PGconn *conn, -By default, libpq prints notice +By default, libpq prints "notice" messages from the backend on stderr, as well as a few error messages that it generates by itself. This behavior can be overridden by supplying a callback function that @@ -1777,14 +1777,14 @@ Without a host name, libpq will connect using a local Unix domain socket. PGPORT sets the default port or local Unix domain socket -file extension for communicating with the PostgreSQL +file extension for communicating with the Postgres backend. PGDATABASE sets the default -PostgreSQL database name. +Postgres database name. @@ -1802,8 +1802,8 @@ sets the password used if the backend demands password authentication. PGREALM sets the Kerberos realm to use with -PostgreSQL, if it is different from the local realm. -If PGREALM is set, PostgreSQL +Postgres, if it is different from the local realm. +If PGREALM is set, Postgres applications will attempt authentication with servers for this realm and use separate ticket files to avoid conflicts with local ticket files. This environment variable is only @@ -1813,7 +1813,7 @@ used if Kerberos authentication is selected by the backend. PGOPTIONS sets additional runtime options for -the PostgreSQL backend. +the Postgres backend. @@ -1878,7 +1878,7 @@ for information on correct values for these environment variables. libpq is thread-safe as of -PostgreSQL 7.0, so long as no two threads +Postgres 7.0, so long as no two threads attempt to manipulate the same PGconn object at the same time. In particular, you can't issue concurrent queries from different threads through the same connection object. (If you need to run concurrent queries, start up multiple diff --git a/doc/src/sgml/lisp.sgml b/doc/src/sgml/lisp.sgml index ecd5abed5f..c2682525e1 100644 --- a/doc/src/sgml/lisp.sgml +++ b/doc/src/sgml/lisp.sgml @@ -1,5 +1,5 @@ @@ -31,8 +31,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/lisp.sgml,v 2.2 2000/03/31 03:27:41 t The code (version 0.2) is available under GNU GPL from - - http://www.chez.com/emarsden/downloads/pg.el + Eric Marsden diff --git a/doc/src/sgml/lobj.sgml b/doc/src/sgml/lobj.sgml index 59e0985470..e052e83c0c 100644 --- a/doc/src/sgml/lobj.sgml +++ b/doc/src/sgml/lobj.sgml @@ -1,5 +1,5 @@ @@ -288,235 +288,235 @@ SELECT lo_export(image.raster, '/tmp/motd') from image /*-------------------------------------------------------------- - * - * testlo.c-- - * test using large objects with libpq - * - * Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * /usr/local/devel/pglite/cvs/src/doc/manual.me,v 1.16 1995/09/01 23:55:00 jolly Exp - * - *-------------------------------------------------------------- - */ - #include <stdio.h> - #include "libpq-fe.h" - #include "libpq/libpq-fs.h" - - #define BUFSIZE 1024 - - /* - * importFile * import file "in_filename" into database as large object "lobjOid" - * - */ - Oid importFile(PGconn *conn, char *filename) - { - Oid lobjId; - int lobj_fd; - char buf[BUFSIZE]; - int nbytes, tmp; - int fd; - - /* - * open the file to be read in - */ - fd = open(filename, O_RDONLY, 0666); - if (fd < 0) { /* error */ - fprintf(stderr, "can't open unix file %s\n", filename); - } - - /* - * create the large object - */ - lobjId = lo_creat(conn, INV_READ|INV_WRITE); - if (lobjId == 0) { - fprintf(stderr, "can't create large object\n"); - } - - lobj_fd = lo_open(conn, lobjId, INV_WRITE); - /* - * read in from the Unix file and write to the inversion file - */ - while ((nbytes = read(fd, buf, BUFSIZE)) > 0) { - tmp = lo_write(conn, lobj_fd, buf, nbytes); - if (tmp < nbytes) { - fprintf(stderr, "error while reading large object\n"); - } - } - - (void) close(fd); - (void) lo_close(conn, lobj_fd); - - return lobjId; - } - - void pickout(PGconn *conn, Oid lobjId, int start, int len) - { - int lobj_fd; - char* buf; - int nbytes; - int nread; - - lobj_fd = lo_open(conn, lobjId, INV_READ); - if (lobj_fd < 0) { - fprintf(stderr,"can't open large object %d\n", - lobjId); - } - - lo_lseek(conn, lobj_fd, start, SEEK_SET); - buf = malloc(len+1); - - nread = 0; - while (len - nread > 0) { - nbytes = lo_read(conn, lobj_fd, buf, len - nread); - buf[nbytes] = ' '; - fprintf(stderr,">>> %s", buf); - nread += nbytes; - } - fprintf(stderr,"\n"); - lo_close(conn, lobj_fd); - } - - void overwrite(PGconn *conn, Oid lobjId, int start, int len) - { - int lobj_fd; - char* buf; - int nbytes; - int nwritten; - int i; - - lobj_fd = lo_open(conn, lobjId, INV_READ); - if (lobj_fd < 0) { - fprintf(stderr,"can't open large object %d\n", - lobjId); - } - - lo_lseek(conn, lobj_fd, start, SEEK_SET); - buf = malloc(len+1); - - for (i=0;i<len;i++) - buf[i] = 'X'; - buf[i] = ' '; - - nwritten = 0; - while (len - nwritten > 0) { - nbytes = lo_write(conn, lobj_fd, buf + nwritten, len - nwritten); - nwritten += nbytes; - } - fprintf(stderr,"\n"); - lo_close(conn, lobj_fd); - } - - /* - * exportFile * export large object "lobjOid" to file "out_filename" - * - */ - void exportFile(PGconn *conn, Oid lobjId, char *filename) - { - int lobj_fd; - char buf[BUFSIZE]; - int nbytes, tmp; - int fd; - - /* - * create an inversion "object" - */ - lobj_fd = lo_open(conn, lobjId, INV_READ); - if (lobj_fd < 0) { - fprintf(stderr,"can't open large object %d\n", - lobjId); - } - - /* - * open the file to be written to - */ - fd = open(filename, O_CREAT|O_WRONLY, 0666); - if (fd < 0) { /* error */ - fprintf(stderr, "can't open unix file %s\n", - filename); - } - - /* - * read in from the Unix file and write to the inversion file - */ - while ((nbytes = lo_read(conn, lobj_fd, buf, BUFSIZE)) > 0) { - tmp = write(fd, buf, nbytes); - if (tmp < nbytes) { - fprintf(stderr,"error while writing %s\n", - filename); - } - } - - (void) lo_close(conn, lobj_fd); - (void) close(fd); - - return; - } - - void - exit_nicely(PGconn* conn) - { - PQfinish(conn); - exit(1); - } - - int - main(int argc, char **argv) - { - char *in_filename, *out_filename; - char *database; - Oid lobjOid; - PGconn *conn; - PGresult *res; - - if (argc != 4) { - fprintf(stderr, "Usage: %s database_name in_filename out_filename\n", - argv[0]); - exit(1); - } - - database = argv[1]; - in_filename = argv[2]; - out_filename = argv[3]; - - /* - * set up the connection - */ - conn = PQsetdb(NULL, NULL, NULL, NULL, database); - - /* check to see that the backend connection was successfully made */ - if (PQstatus(conn) == CONNECTION_BAD) { - fprintf(stderr,"Connection to database '%s' failed.\n", database); - fprintf(stderr,"%s",PQerrorMessage(conn)); - exit_nicely(conn); - } - - res = PQexec(conn, "begin"); - PQclear(res); - - printf("importing file %s\n", in_filename); - /* lobjOid = importFile(conn, in_filename); */ - lobjOid = lo_import(conn, in_filename); - /* - printf("as large object %d.\n", lobjOid); - - printf("picking out bytes 1000-2000 of the large object\n"); - pickout(conn, lobjOid, 1000, 1000); - - printf("overwriting bytes 1000-2000 of the large object with X's\n"); - overwrite(conn, lobjOid, 1000, 1000); - */ - - printf("exporting large object to file %s\n", out_filename); - /* exportFile(conn, lobjOid, out_filename); */ - lo_export(conn, lobjOid,out_filename); - - res = PQexec(conn, "end"); - PQclear(res); - PQfinish(conn); - exit(0); - } + * + * testlo.c-- + * test using large objects with libpq + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * /usr/local/devel/pglite/cvs/src/doc/manual.me,v 1.16 1995/09/01 23:55:00 jolly Exp + * + *-------------------------------------------------------------- + */ +#include <stdio.h> +#include "libpq-fe.h" +#include "libpq/libpq-fs.h" + +#define BUFSIZE 1024 + +/* + * importFile * import file "in_filename" into database as large object "lobjOid" + * + */ +Oid importFile(PGconn *conn, char *filename) +{ + Oid lobjId; + int lobj_fd; + char buf[BUFSIZE]; + int nbytes, tmp; + int fd; + + /* + * open the file to be read in + */ + fd = open(filename, O_RDONLY, 0666); + if (fd < 0) { /* error */ + fprintf(stderr, "can't open unix file %s\n", filename); + } + + /* + * create the large object + */ + lobjId = lo_creat(conn, INV_READ|INV_WRITE); + if (lobjId == 0) { + fprintf(stderr, "can't create large object\n"); + } + + lobj_fd = lo_open(conn, lobjId, INV_WRITE); + /* + * read in from the Unix file and write to the inversion file + */ + while ((nbytes = read(fd, buf, BUFSIZE)) > 0) { + tmp = lo_write(conn, lobj_fd, buf, nbytes); + if (tmp < nbytes) { + fprintf(stderr, "error while reading large object\n"); + } + } + + (void) close(fd); + (void) lo_close(conn, lobj_fd); + + return lobjId; +} + +void pickout(PGconn *conn, Oid lobjId, int start, int len) +{ + int lobj_fd; + char* buf; + int nbytes; + int nread; + + lobj_fd = lo_open(conn, lobjId, INV_READ); + if (lobj_fd < 0) { + fprintf(stderr,"can't open large object %d\n", + lobjId); + } + + lo_lseek(conn, lobj_fd, start, SEEK_SET); + buf = malloc(len+1); + + nread = 0; + while (len - nread > 0) { + nbytes = lo_read(conn, lobj_fd, buf, len - nread); + buf[nbytes] = ' '; + fprintf(stderr,">>> %s", buf); + nread += nbytes; + } + fprintf(stderr,"\n"); + lo_close(conn, lobj_fd); +} + +void overwrite(PGconn *conn, Oid lobjId, int start, int len) +{ + int lobj_fd; + char* buf; + int nbytes; + int nwritten; + int i; + + lobj_fd = lo_open(conn, lobjId, INV_READ); + if (lobj_fd < 0) { + fprintf(stderr,"can't open large object %d\n", + lobjId); + } + + lo_lseek(conn, lobj_fd, start, SEEK_SET); + buf = malloc(len+1); + + for (i=0;i<len;i++) + buf[i] = 'X'; + buf[i] = ' '; + + nwritten = 0; + while (len - nwritten > 0) { + nbytes = lo_write(conn, lobj_fd, buf + nwritten, len - nwritten); + nwritten += nbytes; + } + fprintf(stderr,"\n"); + lo_close(conn, lobj_fd); +} + +/* + * exportFile * export large object "lobjOid" to file "out_filename" + * + */ +void exportFile(PGconn *conn, Oid lobjId, char *filename) +{ + int lobj_fd; + char buf[BUFSIZE]; + int nbytes, tmp; + int fd; + + /* + * create an inversion "object" + */ + lobj_fd = lo_open(conn, lobjId, INV_READ); + if (lobj_fd < 0) { + fprintf(stderr,"can't open large object %d\n", + lobjId); + } + + /* + * open the file to be written to + */ + fd = open(filename, O_CREAT|O_WRONLY, 0666); + if (fd < 0) { /* error */ + fprintf(stderr, "can't open unix file %s\n", + filename); + } + + /* + * read in from the Unix file and write to the inversion file + */ + while ((nbytes = lo_read(conn, lobj_fd, buf, BUFSIZE)) > 0) { + tmp = write(fd, buf, nbytes); + if (tmp < nbytes) { + fprintf(stderr,"error while writing %s\n", + filename); + } + } + + (void) lo_close(conn, lobj_fd); + (void) close(fd); + + return; +} + +void +exit_nicely(PGconn* conn) +{ + PQfinish(conn); + exit(1); +} + +int +main(int argc, char **argv) +{ + char *in_filename, *out_filename; + char *database; + Oid lobjOid; + PGconn *conn; + PGresult *res; + + if (argc != 4) { + fprintf(stderr, "Usage: %s database_name in_filename out_filename\n", + argv[0]); + exit(1); + } + + database = argv[1]; + in_filename = argv[2]; + out_filename = argv[3]; + + /* + * set up the connection + */ + conn = PQsetdb(NULL, NULL, NULL, NULL, database); + + /* check to see that the backend connection was successfully made */ + if (PQstatus(conn) == CONNECTION_BAD) { + fprintf(stderr,"Connection to database '%s' failed.\n", database); + fprintf(stderr,"%s",PQerrorMessage(conn)); + exit_nicely(conn); + } + + res = PQexec(conn, "begin"); + PQclear(res); + + printf("importing file %s\n", in_filename); +/* lobjOid = importFile(conn, in_filename); */ + lobjOid = lo_import(conn, in_filename); +/* + printf("as large object %d.\n", lobjOid); + + printf("picking out bytes 1000-2000 of the large object\n"); + pickout(conn, lobjOid, 1000, 1000); + + printf("overwriting bytes 1000-2000 of the large object with X's\n"); + overwrite(conn, lobjOid, 1000, 1000); +*/ + + printf("exporting large object to file %s\n", out_filename); +/* exportFile(conn, lobjOid, out_filename); */ + lo_export(conn, lobjOid,out_filename); + + res = PQexec(conn, "end"); + PQclear(res); + PQfinish(conn); + exit(0); +} diff --git a/doc/src/sgml/manage-ag.sgml b/doc/src/sgml/manage-ag.sgml index 24e8399199..d27a2094a0 100644 --- a/doc/src/sgml/manage-ag.sgml +++ b/doc/src/sgml/manage-ag.sgml @@ -1,5 +1,5 @@ @@ -133,9 +133,9 @@ Type: \copyright for distribution terms White space (i.e., spaces, tabs and newlines) may be used freely in SQL queries. Single-line comments are denoted by two dashes - (--). Everything after the dashes up to the end of the + ("--"). Everything after the dashes up to the end of the line is ignored. Multiple-line comments, and comments within a line, - are denoted by /* ... */, a convention borrowed + are denoted by "/* ... */", a convention borrowed from Ingres. diff --git a/doc/src/sgml/manage.sgml b/doc/src/sgml/manage.sgml index 4fe540b881..a3c7c166f4 100644 --- a/doc/src/sgml/manage.sgml +++ b/doc/src/sgml/manage.sgml @@ -1,5 +1,5 @@ @@ -46,7 +46,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/manage.sgml,v 1.9 2000/03/31 03:27:41 - To create a new database named mydb from the command line, type + To create a new database named mydb from the command line, type % createdb mydb @@ -93,7 +93,7 @@ ERROR: CREATE DATABASE: Permission denied. Consult with the site administrator regarding preconfigured alternate database locations. Any valid environment variable name may be used to reference an alternate location, - although using variable names with a prefix of PGDATA is recommended + although using variable names with a prefix of PGDATA is recommended to avoid confusion and conflict with other variables. @@ -185,7 +185,7 @@ enter, edit, and execute SQL commands. library. This allows you to submit SQL commands from C and get answers and status messages back to your program. This interface is discussed further - in section ??. + in The PostgreSQL Programmer's Guide. @@ -217,7 +217,7 @@ This prompt indicates that psql is listening to you and that you can type SQL queries into a workspace maintained by the terminal monitor. The psql program responds to escape codes that begin - with the backslash character, \ For example, you + with the backslash character, "\". For example, you can get help on the syntax of various PostgreSQL SQL commands by typing: @@ -232,7 +232,7 @@ mydb=> \g This tells the server to process the query. If you - terminate your query with a semicolon, the \g is not + terminate your query with a semicolon, the "\g" is not necessary. psql will automatically process semicolon terminated queries. To read queries from a file, say myFile, instead of @@ -251,9 +251,9 @@ mydb=> \q prompt.) White space (i.e., spaces, tabs and newlines) may be used freely in SQL queries. Single-line comments are denoted by - --. Everything after the dashes up to the end of the + "--". Everything after the dashes up to the end of the line is ignored. Multiple-line comments, and comments within a line, - are denoted by /* ... */ + are denoted by "/* ... */". diff --git a/doc/src/sgml/notation.sgml b/doc/src/sgml/notation.sgml index 31153c8a1f..4d978b5131 100644 --- a/doc/src/sgml/notation.sgml +++ b/doc/src/sgml/notation.sgml @@ -1,5 +1,5 @@ @@ -75,35 +75,35 @@ $Header: /cvsroot/pgsql/doc/src/sgml/notation.sgml,v 1.9 2000/04/07 13:30:58 tho Notation - ... or /usr/local/pgsql/ + "..." or /usr/local/pgsql/ at the front of a file name is used to represent the path to the Postgres superuser's home directory. In a command synopsis, brackets - ([ and ]) indicate an optional phrase or keyword. + ("[" and "]") indicate an optional phrase or keyword. Anything in braces - ({ and }) and containing vertical bars - (|) + ("{" and "}") and containing vertical bars + ("|") indicates that you must choose one. - In examples, parentheses (( and )) are + In examples, parentheses ("(" and ")") are used to group boolean - expressions. | is the boolean operator OR. + expressions. "|" is the boolean operator OR. Examples will show commands executed from various accounts and programs. Commands executed from the root account will be preceeded with - >. + ">". Commands executed from the Postgres - superuser account will be preceeded with %, while commands + superuser account will be preceeded with "%", while commands executed from an unprivileged user's account will be preceeded with - $. - SQL commands will be preceeded with => + "$". + SQL commands will be preceeded with "=>" or will have no leading prompt, depending on the context. diff --git a/doc/src/sgml/odbc.sgml b/doc/src/sgml/odbc.sgml index 5008830ae3..e2182adbef 100644 --- a/doc/src/sgml/odbc.sgml +++ b/doc/src/sgml/odbc.sgml @@ -1,5 +1,5 @@ @@ -125,7 +125,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 can I write it using ODBC calls to the Postgres server, or is that only when another database program - like MS SQL Server or Access needs to access the data? + like MS SQL Server or Access needs to access the data? + The ODBC API @@ -171,8 +172,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 ApplixWare has an ODBC database interface supported on at least some platforms. - ApplixWare v4.4.1 has been - demonstrated under Linux with Postgres v6.4 + ApplixWare v4.4.2 has been + demonstrated under Linux with Postgres v7.0 using the psqlODBC driver contained in the Postgres distribution. @@ -253,20 +254,36 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 command-line argument for src/configure: - % ./configure --with-odbc - % make +% ./configure --with-odbc +% make + Rebuild the Postgres distribution: - % make install +% make install + + + + Install the ODBC catalog extensions available in + PGROOT/contrib/odbc/odbc.sql: + + +% psql -e template1 < $PGROOT/contrib/odbc/odbc.sql + + + where specifying template1 as the target + database will ensure that all subsequent new databases will + have these same definitions. + + @@ -278,7 +295,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 This can be overridden from the make command-line as - % make ODBCINST=filename install +% make ODBCINST=filename install @@ -304,9 +321,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 sources, type: - % ./configure - % make - % make POSTGRESDIR=PostgresTopDir install +% ./configure +% make +% make POSTGRESDIR=PostgresTopDir install @@ -317,7 +334,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 then you can specify various destinations explicitly: - % make BINDIR=bindir LIBDIR=libdir HEADERDIR=headerdir ODBCINST=instfile install +% make BINDIR=bindir LIBDIR=libdir HEADERDIR=headerdir ODBCINST=instfile install @@ -368,7 +385,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 or gzipped tarfile to an empty directory. If using the zip package unzip it with the command - % unzip -a packagename +% unzip -a packagename The option @@ -380,7 +397,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 If you have the gzipped tar package than simply run - tar -xzf packagename +% tar -xzf packagename @@ -404,8 +421,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 Create the tar file: - % cd interfaces/odbc - % make standalone +% cd interfaces/odbc +% make standalone @@ -429,7 +446,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 Configure the standalone installation: - % ./configure +% ./configure @@ -437,8 +454,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 The configuration can be done with options: - % ./configure --prefix=rootdir - --with-odbc=inidir +% ./configure --prefix=rootdir --with-odbc=inidir where installs the libraries and headers in @@ -463,7 +479,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 Compile and link the source code: - % make ODBCINST=instdir +% make ODBCINST=instdir @@ -493,7 +509,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 Install the source code: - % make POSTGRESDIR=targettree install +% make POSTGRESDIR=targettree install @@ -513,9 +529,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 Here is how you would specify the various destinations explicitly: - % make BINDIR=bindir - LIBDIR>libdi> - HEADERDIR=headerdir install +% make BINDIR=bindir LIBDIR=libdir HEADERDIR=headerdir install @@ -523,7 +537,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 For example, typing - % make POSTGRESDIR=/opt/psqlodbc install +% make POSTGRESDIR=/opt/psqlodbc install (after you've used @@ -537,7 +551,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 The command - % make POSTGRESDIR=/opt/psqlodbc HEADERDIR=/usr/local install +% make POSTGRESDIR=/opt/psqlodbc HEADERDIR=/usr/local install should cause the libraries to be installed in /opt/psqlodbc/lib and @@ -570,10 +584,10 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/odbc.sgml,v 1.14 2000/03/31 03:27:41 [ODBC Data Sources] and must contain the following entries: - Driver = POSTGRESDIR/lib/libpsqlodbc.so - Database=DatabaseName - Servername=localhost - Port=5432 +Driver = POSTGRESDIR/lib/libpsqlodbc.so +Database=DatabaseName +Servername=localhost +Port=5432 @@ -632,6 +646,7 @@ InstallDir = /opt/applix/axdata/axshlib + ApplixWare @@ -679,7 +694,7 @@ InstallDir = /opt/applix/axdata/axshlib find the line that starts with - #libFor elfodbc /ax/... +#libFor elfodbc /ax/... @@ -688,7 +703,7 @@ InstallDir = /opt/applix/axdata/axshlib Change line to read - libFor elfodbc applixroot/applix/axdata/axshlib/lib +libFor elfodbc applixroot/applix/axdata/axshlib/lib which will tell elfodbc to look in this directory @@ -709,7 +724,7 @@ InstallDir = /opt/applix/axdata/axshlib described above. You may also want to add the flag - TextAsLongVarchar=0 +TextAsLongVarchar=0 to the database-specific portion of .odbc.ini @@ -763,7 +778,7 @@ InstallDir = /opt/applix/axdata/axshlib - You should see Starting elfodbc server + You should see "Starting elfodbc server" in the lower left corner of the data window. If you get an error dialog box, see the debugging section below. @@ -895,14 +910,14 @@ InstallDir = /opt/applix/axdata/axshlib the axnet process. For example, if - ps -aucx | grep ax +% ps -aucx | grep ax shows - cary 10432 0.0 2.6 1740 392 ? S Oct 9 0:00 axnet - cary 27883 0.9 31.0 12692 4596 ? S 10:24 0:04 axmain +cary 10432 0.0 2.6 1740 392 ? S Oct 9 0:00 axnet +cary 27883 0.9 31.0 12692 4596 ? S 10:24 0:04 axmain @@ -910,7 +925,7 @@ InstallDir = /opt/applix/axdata/axshlib Then run - strace -f -s 1024 -p 10432 +% strace -f -s 1024 -p 10432 @@ -934,16 +949,16 @@ InstallDir = /opt/applix/axdata/axshlib For example, after getting - a Cannot launch gateway on server, + a "Cannot launch gateway on server", I ran strace on axnet and got - [pid 27947] open("/usr/lib/libodbc.so", O_RDONLY) = -1 ENOENT - (No such file or directory) - [pid 27947] open("/lib/libodbc.so", O_RDONLY) = -1 ENOENT - (No such file or directory) - [pid 27947] write(2, "/usr2/applix/axdata/elfodbc: - can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) +[pid 27947] open("/usr/lib/libodbc.so", O_RDONLY) = -1 ENOENT +(No such file or directory) +[pid 27947] open("/lib/libodbc.so", O_RDONLY) = -1 ENOENT +(No such file or directory) +[pid 27947] write(2, "/usr2/applix/axdata/elfodbc: +can't load library 'libodbc.so'\n", 61) = -1 EIO (I/O error) So what is happening is that applix elfodbc is searching for libodbc.so, but it can't find it. That is why axnet.cnf needed to be changed. @@ -1034,7 +1049,7 @@ InstallDir = /opt/applix/axdata/axshlib - Enter the value sqldemo, then click OK. + Enter the value "sqldemo", then click OK. @@ -1060,10 +1075,10 @@ InstallDir = /opt/applix/axdata/axshlib ~/axhome/macros/login.am file: - macro login - set_set_system_var@("sql_username@","tgl") - set_system_var@("sql_passwd@","no$way") - endmacro +macro login +set_set_system_var@("sql_username@","tgl") +set_system_var@("sql_passwd@","no$way") +endmacro diff --git a/doc/src/sgml/oper.sgml b/doc/src/sgml/oper.sgml index 84979a548b..aa42407bbd 100644 --- a/doc/src/sgml/oper.sgml +++ b/doc/src/sgml/oper.sgml @@ -1,5 +1,5 @@ @@ -22,7 +22,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/oper.sgml,v 1.15 2000/03/31 03:27:41 - To view all variations of the || string concatenation operator, + To view all variations of the "||" string concatenation operator, try SELECT oprleft, oprright, oprresult, oprcode @@ -74,15 +74,9 @@ Operator Ordering (decreasing precedence) - -Element - - -Precedence - - -Description - +Element +Precedence +Description @@ -141,6 +135,8 @@ right unary minus + : @@ -283,7 +280,7 @@ string pattern matching -boolean inequality +inequality @@ -305,7 +302,7 @@ NOT right -negation +logical negation @@ -494,11 +491,14 @@ logical union Natural Exponentiation : 3.0 + @ Absolute value @@ -527,8 +527,8 @@ logical union - The operators ":" and ";" are deprecated, and will be removed in - the near future. Use the equivalent functions exp() and ln() + Two operators, ":" and ";", are now deprecated and will be removed in + the next release. Use the equivalent functions exp() and ln() instead. diff --git a/doc/src/sgml/plsql.sgml b/doc/src/sgml/plsql.sgml index 0a3d33c486..95adb1fafb 100644 --- a/doc/src/sgml/plsql.sgml +++ b/doc/src/sgml/plsql.sgml @@ -1,5 +1,5 @@ @@ -512,7 +512,7 @@ RETURN expression RAISE level format'' [, identifier [...]]; - Inside the format, % is used as a placeholder for the + Inside the format, "%" is used as a placeholder for the subsequent comma-separated identifiers. Possible levels are DEBUG (silently suppressed in production running databases), NOTICE (written into the database log and forwarded to the client application) diff --git a/doc/src/sgml/ports.sgml b/doc/src/sgml/ports.sgml index a5a71a960e..fb85957124 100644 --- a/doc/src/sgml/ports.sgml +++ b/doc/src/sgml/ports.sgml @@ -2,7 +2,7 @@ Ports - This manual describes version 6.5 of Postgres. + This manual describes version 7.0 of Postgres. The Postgres developer community has compiled and tested Postgres on a number of platforms. Check @@ -34,180 +34,171 @@ RS6000 v7.0 2000-04-05 - (Andreas Zeugswetter) + Andreas Zeugswetter BSDI 4.01 x86 v7.0 2000-04-04 - (Bruce Momjian + Bruce Momjian Compaq Tru64 5.0 Alpha v7.0 2000-04-11 - (Andrew McMurry) + Andrew McMurry FreeBSD 4.0 x86 v7.0 2000-04-04 - (Marc Fournier) + Marc Fournier HPUX PA-RISC v7.0 2000-04-12 - Both 9.0x and 10.20 - (Tom Lane) + Both 9.0x and 10.20. + Tom Lane IRIX 6.5.6f MIPS v6.5.3 2000-02-18 - MIPSPro 7.3.1.1m; full N32 build - (Kevin Wheatley) + MIPSPro 7.3.1.1m N32 build. + Kevin Wheatley Linux 2.0.x Alpha v7.0 2000-04-05 - With patches - (Ryan Kirkpatrick) + With published patches. + Ryan Kirkpatrick Linux 2.2.x armv4l v7.0 2000-04-17 - Regression test needs work - (Mark Knox) + Regression test needs work. + Mark Knox Linux 2.2.x x86 v7.0 2000-03-26 - (Lamar Owens) + Lamar Owens Linux 2.0.x MIPS v7.0 2000-04-13 - Cobalt Qube - (Tatsuo Ishii) + Cobalt Qube. + Tatsuo Ishii Linux 2.2.5 Sparc v7.0 2000-04-02 - (Tom Szybist) + Tom Szybist LinuxPPC R4 PPC603e v7.0 2000-04-13 - (Tatsuo Ishii) + Tatsuo Ishii mklinux PPC750 v7.0 2000-04-13 - (Tatsuo Ishii) + Tatsuo Ishii NetBSD 1.4 arm32 v7.0 2000-04-08 - (Patrick - Welche) + Patrick + Welche NetBSD 1.4U x86 v7.0 2000-03-26 - (Patrick - Welche) + Patrick + Welche NetBSD m68k v7.0 2000-04-10 - Mac 8xx - (Henry B. Hotz) + Mac 8xx. + Henry B. Hotz NetBSD/sparc Sparc v7.0 2000-04-13 - (Tom I Helbekkmo) + Tom I Helbekkmo QNX 4.25 x86 v7.0 2000-04-01 - - (Dr. Andreas Kardos) + Dr. Andreas Kardos SCO OpenServer 5 x86 v6.5 1999-05-25 - (Andrew Merrill) + Andrew Merrill SCO UnixWare 7 x86 v7.0 2000-04-18 - See FAQ; needs patch for compiler bug - (Billy G. Allie) + See FAQ. + Billy G. Allie Solaris x86 v7.0 2000-04-12 - (Marc Fournier) + Marc Fournier Solaris 2.5.1-2.7 Sparc v7.0 2000-04-12 - (Peter Eisentraut, - Marc Fournier) + Peter Eisentraut, + Marc Fournier SunOS 4.1.4 Sparc v7.0 2000-04-13 - (Tatsuo Ishii) - - - SVR4 - MIPS - v6.4 - 1998-10-28 - No 64-bit int compiler support - (Frank Ridderbusch) + Tatsuo Ishii Windows/Win32 @@ -215,15 +206,15 @@ v7.0 2000-04-02 Client-side libraries or ODBC/JDBC. No server-side. - (Magnus Hagander + Magnus Hagander WinNT/Cygwin x86 v7.0 2000-03-30 - Working with the Cygwin library. - (Daniel Horak) + Uses Cygwin library. + Daniel Horak) @@ -235,7 +226,8 @@ For Windows NT, the server-side port of Postgres uses the RedHat/Cygnus Cygwin library and - toolset. + toolset. For Windows 9x, no + server-side port is available due to OS limitations. @@ -256,7 +248,7 @@ tested for v7.0 or v6.5.x: - Obsolete Platforms + Unsupported Platforms @@ -269,39 +261,56 @@ + BeOS + x86 + v7.0 + 2000-05-01 + Client-side coming soon? + Adam Haberlach + + DGUX 5.4R4.11 m88k v6.3 1998-03-01 v6.4 probably OK. Needs new maintainer. - (Brian E Gallew) + Brian E Gallew NetBSD-current NS32532 v6.4 1998-10-27 - small problems in date/time math - (Jon Buller) + Date math annoyances. + Jon Buller NetBSD 1.3 VAX v6.3 1998-03-01 - (Tom I Helbekkmo) + v7.0 should work. + Tom I Helbekkmo SVR4 4.4 m88k v6.2.1 1998-03-01 - Confirmed with patching; v6.4.x will need TAS spinlock code - (Doug Winterburn) + v6.4.x will need TAS spinlock code. + Doug Winterburn + + + SVR4 + MIPS + v6.4 + 1998-10-28 + No 64-bit int. + Frank Ridderbusch Ultrix - MIPS,VAX? + MIPS, VAX v6.x 1998-03-01 No recent reports; obsolete? @@ -342,8 +351,8 @@ x86 v6.x 1998-03-01 - Client-only support; v1.0.9 worked with patches (David Wetzel) + Client-only support; v1.0.9 worked with patches + David Wetzel diff --git a/doc/src/sgml/postgres.sgml b/doc/src/sgml/postgres.sgml index e59289c631..2f7d150264 100644 --- a/doc/src/sgml/postgres.sgml +++ b/doc/src/sgml/postgres.sgml @@ -1,5 +1,5 @@ + @@ -106,27 +107,28 @@ $Header: /cvsroot/pgsql/doc/src/sgml/postgres.sgml,v 1.35 2000/03/31 03:26:21 th ]> - + + - PostgreSQL - - Covering v6.5 for general release - - - The PostgreSQL Development Team - + PostgreSQL + + Covering v7.0 for general release + + + The PostgreSQL Development Team + - - Thomas - Lockhart - - Caltech/JPL - - + + Thomas + Lockhart + + Caltech/JPL + + @@ -135,17 +137,17 @@ $Header: /cvsroot/pgsql/doc/src/sgml/postgres.sgml,v 1.35 2000/03/31 03:26:21 th TGL --> - (last updated 1999-06-01) - + (last updated 2000-05-01) + - - - PostgreSQL is Copyright © 1996-9 - by the Postgres Global Development Group. - - + + + PostgreSQL is Copyright © 1996-2000 + by PostgreSQL Inc. + + - + - Summary + Summary - - Postgres, + + Postgres, developed originally in the UC Berkeley Computer Science Department, pioneered many of the object-relational concepts now becoming available in some commercial databases. It provides SQL92/SQL3 language support, transaction integrity, and type extensibility. - PostgreSQL is an + PostgreSQL is an open-source descendant of this original Berkeley code. - - + + - - User's Guide - - + + User's Guide + + Information for users. - - + + &intro; &syntax; @@ -202,15 +204,15 @@ Your name here... &plan; &populate; &commands; - + - - Administrator's Guide - - + + Administrator's Guide + + Installation and maintenance information. - - + + &protocol; &signals; &compiler; &bki; &page; - + - - Tutorial - - + + Tutorial + + Introduction for new users. - - + + &sql; &arch; &start; &query; &advanced; - + - - Appendices - - + + Appendices + + Additional related information. - - + + &datetime; &cvs; @@ -328,7 +336,7 @@ Your name here... &contacts; --> &biblio; - + - + @@ -44,6 +44,7 @@ Postgres Programmer's Guide. + @@ -62,7 +63,7 @@ Postgres Programmer's Guide. PostgreSQL Programmer's Guide - Covering v6.5 for general release + Covering v7.0 for general release The PostgreSQL Development Team @@ -85,13 +86,13 @@ Postgres Programmer's Guide. TGL --> - (last updated 1999-06-19) + (last updated 2000-05-01) - PostgreSQL is Copyright © 1996-9 - by the Postgres Global Development Group. + PostgreSQL is Copyright © 1996-2000 + by PostgreSQL Inc. @@ -148,29 +149,35 @@ Disable it until we put in some info. &func-ref; --> - &trigger; - &spi; - &lobj; - &libpq; - &libpqpp; - &libpgtcl; - &libpgeasy; - &ecpg; - &odbc; - &jdbc; - &lisp; + &trigger; + &spi; + &lobj; + &libpq; + &libpqpp; + &libpgtcl; + &libpgeasy; + &ecpg; + &odbc; + &jdbc; + &lisp; - &sources; - &arch-dev; - &options; - &geqo; - &protocol; - &signals; - &compiler; - &bki; - &page; + &sources; + &arch-dev; + &options; + &geqo; + + &protocol; + &signals; + &compiler; + &bki; + &page; diff --git a/doc/src/sgml/ref/create_database.sgml b/doc/src/sgml/ref/create_database.sgml index 00dae61b2b..1c5daa6263 100644 --- a/doc/src/sgml/ref/create_database.sgml +++ b/doc/src/sgml/ref/create_database.sgml @@ -1,5 +1,5 @@ @@ -132,7 +132,7 @@ CREATE DATABASE name [ WITH LOCATIO - ERROR: Unable to create database directory 'xxx'. + ERROR: Unable to create database directory 'path'. ERROR: Could not initialize database directory. @@ -164,8 +164,8 @@ CREATE DATABASE name [ WITH LOCATIO An alternate location can be specified in order to, for example, store the database on a different disk. - The path must have been prepared with the + The path must have been prepared with the + command. diff --git a/doc/src/sgml/ref/create_index.sgml b/doc/src/sgml/ref/create_index.sgml index 2029a27a4e..43d6ffe38c 100644 --- a/doc/src/sgml/ref/create_index.sgml +++ b/doc/src/sgml/ref/create_index.sgml @@ -1,5 +1,5 @@ @@ -227,6 +227,11 @@ ERROR: Cannot create index: 'index_name' already exists. access methods). + + Use + to remove an index. + + 1998-09-09 @@ -339,11 +344,6 @@ SELECT am.amname AS acc_name, - - Use - to remove an index. - - Usage diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index 7208242c26..0e77618799 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -1,5 +1,5 @@ <!-- -$Header: /cvsroot/pgsql/doc/src/sgml/ref/create_table.sgml,v 1.28 2000/04/15 23:29:58 momjian Exp $ +$Header: /cvsroot/pgsql/doc/src/sgml/ref/create_table.sgml,v 1.29 2000/05/02 20:02:03 thomas Exp $ Postgres documentation --> @@ -905,13 +905,12 @@ ERROR: Cannot insert a duplicate key into a unique index. REFERENCES Constraint -[ CONSTRAINT name ] REFERENCES -reftable [ ( refcolumn ) ] -[ MATCH matchtype ] -[ ON DELETE action ] -[ ON UPDATE action ] -[ [ NOT ] DEFERRABLE ] -[ INITIALLY checktime ] +[ CONSTRAINT name ] REFERENCES reftable [ ( refcolumn ) ] + [ MATCH matchtype ] + [ ON DELETE action ] + [ ON UPDATE action ] + [ [ NOT ] DEFERRABLE ] + [ INITIALLY checktime ] The REFERENCES constraint specifies a rule that a column @@ -1448,14 +1447,13 @@ CREATE TABLE distributors ( REFERENCES Constraint -[ CONSTRAINT name ] -FOREIGN KEY ( column [, ...] ) REFERENCES -reftable [ ( refcolumn [, ...] ) ] -[ MATCH matchtype ] -[ ON DELETE action ] -[ ON UPDATE action ] -[ [ NOT ] DEFERRABLE ] -[ INITIALLY checktime ] +[ CONSTRAINT name ] FOREIGN KEY ( column [, ...] ) + REFERENCES reftable [ ( refcolumn [, ...] ) ] + [ MATCH matchtype ] + [ ON DELETE action ] + [ ON UPDATE action ] + [ [ NOT ] DEFERRABLE ] + [ INITIALLY checktime ] The REFERENCES constraint specifies a rule that a column value is @@ -1901,7 +1899,7 @@ CREATE TEMPORARY TABLE actors ( Table Constraint definition: -[ CONSTRAINT name ] UNIQUE ( column [, ...] ) +[ CONSTRAINT name ] UNIQUE ( column [, ...] ) [ { INITIALLY DEFERRED | INITIALLY IMMEDIATE } ] [ [ NOT ] DEFERRABLE ] @@ -1911,7 +1909,7 @@ CREATE TEMPORARY TABLE actors ( Column Constraint definition: -[ CONSTRAINT name ] UNIQUE +[ CONSTRAINT name ] UNIQUE [ {INITIALLY DEFERRED | INITIALLY IMMEDIATE} ] [ [ NOT ] DEFERRABLE ] @@ -1928,7 +1926,7 @@ CREATE TEMPORARY TABLE actors ( included for symmetry with the NOT NULL clause. Since it is the default for any column, its presence is simply noise. -[ CONSTRAINT name ] NULL +[ CONSTRAINT name ] NULL @@ -1941,7 +1939,7 @@ CREATE TEMPORARY TABLE actors ( SQL92 specifies some additional capabilities for NOT NULL: -[ CONSTRAINT name ] NOT NULL +[ CONSTRAINT name ] NOT NULL [ {INITIALLY DEFERRED | INITIALLY IMMEDIATE} ] [ [ NOT ] DEFERRABLE ] @@ -1965,9 +1963,7 @@ the column. Not our problem... or a domain. - DEFAULT niladic USER function | - niladic datetime function | - NULL +DEFAULT niladic_user_function | niladic_datetime_function | NULL --> @@ -1994,7 +1990,7 @@ the column. Not our problem... as an alternate method for defining a constraint: -CREATE ASSERTION name CHECK ( condition ) +CREATE ASSERTION name CHECK ( condition ) @@ -2005,7 +2001,7 @@ CREATE ASSERTION name CHECK ( condition ) Domain constraint: -[ CONSTRAINT name ] CHECK constraint +[ CONSTRAINT name ] CHECK constraint [ {INITIALLY DEFERRED | INITIALLY IMMEDIATE} ] [ [ NOT ] DEFERRABLE ] @@ -2015,7 +2011,7 @@ CREATE ASSERTION name CHECK ( condition ) Table constraint definition: -[ CONSTRAINT name ] { PRIMARY KEY ( column, ... ) | FOREIGN KEY constraint | UNIQUE constraint | CHECK constraint } +[ CONSTRAINT name ] { PRIMARY KEY ( column, ... ) | FOREIGN KEY constraint | UNIQUE constraint | CHECK constraint } [ {INITIALLY DEFERRED | INITIALLY IMMEDIATE} ] [ [ NOT ] DEFERRABLE ] @@ -2025,7 +2021,7 @@ CREATE ASSERTION name CHECK ( condition ) Column constraint definition: -[ CONSTRAINT name ] { NOT NULL | PRIMARY KEY | FOREIGN KEY constraint | UNIQUE | CHECK constraint } +[ CONSTRAINT name ] { NOT NULL | PRIMARY KEY | FOREIGN KEY constraint | UNIQUE | CHECK constraint } [ {INITIALLY DEFERRED | INITIALLY IMMEDIATE} ] [ [ NOT ] DEFERRABLE ] @@ -2067,8 +2063,8 @@ CREATE ASSERTION name CHECK ( condition ) INITIALLY IMMEDIATE - Check constraint only at the end of the transaction. This - is the default + Check constraint only at the end of the transaction. This + is the default @@ -2076,7 +2072,7 @@ CREATE ASSERTION name CHECK ( condition ) INITIALLY DEFERRED - Check constraint after each statement. + Check constraint after each statement. @@ -2106,7 +2102,7 @@ affect a column or a table. table constraint definition: -[ CONSTRAINT name ] CHECK ( VALUE condition ) +[ CONSTRAINT name ] CHECK ( VALUE condition ) [ {INITIALLY DEFERRED | INITIALLY IMMEDIATE} ] [ [ NOT ] DEFERRABLE ] @@ -2115,7 +2111,7 @@ affect a column or a table. column constraint definition: -[ CONSTRAINT name ] CHECK ( VALUE condition ) +[ CONSTRAINT name ] CHECK ( VALUE condition ) [ {INITIALLY DEFERRED | INITIALLY IMMEDIATE} ] [ [ NOT ] DEFERRABLE ] @@ -2125,7 +2121,7 @@ affect a column or a table. domain constraint definition: - [ CONSTRAINT name ] + [ CONSTRAINT name] CHECK ( VALUE condition ) [ {INITIALLY DEFERRED | INITIALLY IMMEDIATE} ] [ [ NOT ] DEFERRABLE ] @@ -2154,7 +2150,7 @@ ALTER DOMAIN cities Table Constraint definition: -[ CONSTRAINT name ] PRIMARY KEY ( column [, ...] ) +[ CONSTRAINT name ] PRIMARY KEY ( column [, ...] ) [ {INITIALLY DEFERRED | INITIALLY IMMEDIATE} ] [ [ NOT ] DEFERRABLE ] @@ -2162,7 +2158,7 @@ ALTER DOMAIN cities Column Constraint definition: -[ CONSTRAINT name ] PRIMARY KEY +[ CONSTRAINT name ] PRIMARY KEY [ {INITIALLY DEFERRED | INITIALLY IMMEDIATE} ] [ [ NOT ] DEFERRABLE ] diff --git a/doc/src/sgml/ref/initdb.sgml b/doc/src/sgml/ref/initdb.sgml index 48040c9824..2ee434511b 100644 --- a/doc/src/sgml/ref/initdb.sgml +++ b/doc/src/sgml/ref/initdb.sgml @@ -1,5 +1,5 @@ @@ -24,11 +24,11 @@ Postgres documentation initdb [ --pgdata|-D dbdir ] - [ --sysid|-i sysid ] - [ --pwprompt|-W ] - [ --encoding|-E encoding ] - [ --pglib|-L libdir ] - [ --noclean | -n ] [ --debug | -d ] [ --template | -t ] + [ --sysid|-i sysid ] + [ --pwprompt|-W ] + [ --encoding|-E encoding ] + [ --pglib|-L libdir ] + [ --noclean | -n ] [ --debug | -d ] [ --template | -t ] diff --git a/doc/src/sgml/ref/pgctl-ref.sgml b/doc/src/sgml/ref/pgctl-ref.sgml index 897d42179c..e6a0979b3f 100644 --- a/doc/src/sgml/ref/pgctl-ref.sgml +++ b/doc/src/sgml/ref/pgctl-ref.sgml @@ -1,5 +1,5 @@ @@ -28,7 +28,8 @@ Postgres documentation pg_ctl [-w] [-D datadir][-p path] [-o "options"] start pg_ctl [-w] [-D datadir] [-m [s[mart]|f[ast]|i[mmediate]]] stop -pg_ctl [-w] [-D datadir] [-m [s[mart]|f[ast]|i[mmediate]] [-o "options"] restart +pg_ctl [-w] [-D datadir] [-m [s[mart]|f[ast]|i[mmediate]] + [-o "options"] restart pg_ctl [-D datadir] status diff --git a/doc/src/sgml/ref/postmaster.sgml b/doc/src/sgml/ref/postmaster.sgml index 958bcd3b3d..beb3fb046c 100644 --- a/doc/src/sgml/ref/postmaster.sgml +++ b/doc/src/sgml/ref/postmaster.sgml @@ -1,5 +1,5 @@ @@ -24,8 +24,8 @@ Postgres documentation postmaster [ -B nBuffers ] [ -D DataDir ] [ -N maxBackends ] [ -S ] - [ -d DebugLevel ] [ -i ] [ -l ] - [ -o BackendOptions ] [ -p port ] [ -n | -s ] + [ -d DebugLevel ] [ -i ] [ -l ] + [ -o BackendOptions ] [ -p port ] [ -n | -s ] diff --git a/doc/src/sgml/ref/vacuumdb.sgml b/doc/src/sgml/ref/vacuumdb.sgml index 1b4a1eed29..f6aae0d949 100644 --- a/doc/src/sgml/ref/vacuumdb.sgml +++ b/doc/src/sgml/ref/vacuumdb.sgml @@ -1,5 +1,5 @@ @@ -23,8 +23,9 @@ Postgres documentation 1999-12-04 -vacuumdb [ connection options ] [ --analyze | -z ] [ --alldb | -a ] [ --verbose | -v ] - [ --table 'table [ ( column [,...] ) ]' ] [ [-d] dbname ] +vacuumdb [ options ] [ --analyze | -z ] + [ --alldb | -a ] [ --verbose | -v ] + [ --table 'table [ ( column [,...] ) ]' ] [ [-d] dbname ] @@ -39,7 +40,8 @@ vacuumdb [ connection options ] [ - - [-d, --dbname] dbname + -d dbname + --dbname dbname Specifies the name of the database to be cleaned or analyzed. @@ -48,7 +50,8 @@ vacuumdb [ connection options ] [ - - -z, --analyze + -z + --analyze Calculate statistics on the database for use by the optimizer. @@ -57,7 +60,8 @@ vacuumdb [ connection options ] [ - - -a, --alldb + -a + --alldb Vacuum all databases. @@ -66,7 +70,8 @@ vacuumdb [ connection options ] [ - - -v, --verbose + -v + --verbose Print detailed information during processing. @@ -75,7 +80,8 @@ vacuumdb [ connection options ] [ - - -t, --table table [ (column [,...]) ] + -t table [ (column [,...]) ] + --table table [ (column [,...]) ] Clean or analyze table only. @@ -100,7 +106,8 @@ vacuumdb [ connection options ] [ - - -h, --host host + -h host + --host host Specifies the hostname of the machine on which the @@ -111,7 +118,8 @@ vacuumdb [ connection options ] [ - - -p, --port port + -p port + --port port Specifies the Internet TCP/IP port or local Unix domain socket file @@ -122,7 +130,8 @@ vacuumdb [ connection options ] [ - - -U, --username username + -U username + --username username Username to connect as. @@ -131,7 +140,8 @@ vacuumdb [ connection options ] [ - - -W, --password + -W + --password Force password prompt. @@ -140,7 +150,8 @@ vacuumdb [ connection options ] [ - - -e, --echo + -e + --echo Echo the commands that vacuumdb generates @@ -150,7 +161,8 @@ vacuumdb [ connection options ] [ - - -q, --quiet + -q + --quiet Do not display a response. diff --git a/doc/src/sgml/reference.sgml b/doc/src/sgml/reference.sgml index 7dd2eeaafb..1bf3544a1a 100644 --- a/doc/src/sgml/reference.sgml +++ b/doc/src/sgml/reference.sgml @@ -1,34 +1,8 @@ ]> - - - -PostgreSQL Reference Manual - - Covering v6.5 for general release - - - - Jose - Soares Da Silva - - - Oliver - Elphick - - + + + + + PostgreSQL Reference Manual + + Covering v6.5 for general release + + + + Jose + Soares Da Silva + + + Oliver + Elphick + + - - Oliver - Elphick - + + Oliver + Elphick + - (last updated 1999-06-01) - + (last updated 2000-05-01) + - - -PostgreSQL is © 1998-9 -by the Postgres Global Development Group. - - + + + PostgreSQL is © 1998-2000 + by PostgreSQL Inc. + + - + - -Summary - - -Postgres, - developed originally in the UC Berkeley Computer Science Department, - pioneered many of the object-relational concepts - now becoming available in some commercial databases. -It provides SQL92/SQL3 language support, - transaction integrity, and type extensibility. - PostgreSQL is a public-domain, open source descendant - of this original Berkeley code. - - + + Summary + + + Postgres, + developed originally in the UC Berkeley Computer Science Department, + pioneered many of the object-relational concepts + now becoming available in some commercial databases. + It provides SQL92/SQL3 language support, + transaction integrity, and type extensibility. + PostgreSQL is a public-domain, open source descendant + of this original Berkeley code. + + -&commands; + &commands; -&biblio; + &biblio; - - + + + diff --git a/doc/src/sgml/regress.sgml b/doc/src/sgml/regress.sgml index a4ed601ce4..56e1d61051 100644 --- a/doc/src/sgml/regress.sgml +++ b/doc/src/sgml/regress.sgml @@ -1,267 +1,314 @@ - -Regression Test - - - -Regression test instructions and analysis. - - - - - The PostgreSQL regression tests are a comprehensive set of tests for the - SQL implementation embedded in PostgreSQL. They test standard SQL - operations as well as the extended capabilities of PostgreSQL. - - - - There are two different ways in which the regression tests can be run: - the "sequential" method and the "parallel" method. The sequential method - runs each test script in turn, whereas the parallel method starts up - multiple server processes to run groups of tests in parallel. Parallel - testing gives confidence that interprocess communication and locking - are working correctly. Another key difference is that the sequential - test procedure uses an already-installed postmaster, whereas the - parallel test procedure tests a system that has been built but not yet - installed. (The parallel test script actually does an installation into - a temporary directory and fires up a private postmaster therein.) - - - - Some properly installed and fully functional PostgreSQL installations - can "fail" some of these regression tests due to artifacts of floating point - representation and time zone support. The tests are currently evaluated - using a simple diff comparison against the - outputs generated on a reference system, so the results are sensitive to - small system differences. - When a test is reported as "failed", always examine the differences - between expected and actual results; you may well find that the differences - are not significant. - - - - The regression tests were originally developed by Jolly Chen and Andrew Yu, - and were extensively revised/repackaged by Marc Fournier and Thomas Lockhart. - From PostgreSQL v6.1 onward - the regression tests are current for every official release. - - - -Regression Environment - - -The regression testing notes below assume the following (except where noted): - - - -Commands are Unix-compatible. See note below. - - - - -Defaults are used except where noted. - - - - -User postgres is the Postgres superuser. - - - - -The source path is /usr/src/pgsql (other paths are possible). - - - - -The runtime path is /usr/local/pgsql (other paths are possible). - - - - - - - Normally, the regression tests should be run as the postgres user since - the 'src/test/regress' directory and sub-directories are owned by the - postgres user. If you run the regression test as another user the - 'src/test/regress' directory tree must be writeable by that user. - - - - It was formerly necessary to run the postmaster with system time zone - set to PST, but this is no longer required. You can run the regression - tests under your normal postmaster configuration. The test script will - set the PGTZ environment variable to ensure that timezone-dependent tests - produce the expected results. However, your system must provide - library support for the PST8PDT time zone, or the timezone-dependent - tests will fail. - To verify that your machine does have this support, type - the following: - - setenv TZ PST8PDT - date - - - - - The "date" command above should have returned the current system time - in the PST8PDT time zone. If the PST8PDT database is not available, then - your system may have returned the time in GMT. If the PST8PDT time zone - is not available, you can set the time zone rules explicitly: - - setenv PGTZ PST8PDT7,M04.01.0,M10.05.03 - - - - - - Directory Layout - - - - - This should become a table in the previous section. - - - - - - - input/ .... .source files that are converted using 'make all' into - some of the .sql files in the 'sql' subdirectory + + Regression Test + + + + Regression test instructions and analysis. + + + + + The PostgreSQL regression tests are a comprehensive set of tests for the + SQL implementation embedded in PostgreSQL. They test standard SQL + operations as well as the extended capabilities of PostgreSQL. + + + + There are two different ways in which the regression tests can be run: + the "sequential" method and the "parallel" method. The sequential method + runs each test script in turn, whereas the parallel method starts up + multiple server processes to run groups of tests in parallel. Parallel + testing gives confidence that interprocess communication and locking + are working correctly. Another key difference is that the sequential + test procedure uses an already-installed postmaster, whereas the + parallel test procedure tests a system that has been built but not yet + installed. (The parallel test script actually does an installation into + a temporary directory and fires up a private postmaster therein.) + + + + Some properly installed and fully functional PostgreSQL installations + can "fail" some of these regression tests due to artifacts of floating point + representation and time zone support. The tests are currently evaluated + using a simple diff comparison against the + outputs generated on a reference system, so the results are sensitive to + small system differences. + When a test is reported as "failed", always examine the differences + between expected and actual results; you may well find that the differences + are not significant. + + + + The regression tests were originally developed by Jolly Chen and Andrew Yu, + and were extensively revised/repackaged by Marc Fournier and Thomas Lockhart. + From PostgreSQL v6.1 onward + the regression tests are current for every official release. + + + + Regression Environment + + + The regression testing notes below assume the following (except where noted): + + + + Commands are Unix-compatible. See note below. + + + + + Defaults are used except where noted. + + + + + User postgres is the Postgres superuser. + + + + + The source path is /usr/src/pgsql (other paths are possible). + + + + + The runtime path is /usr/local/pgsql (other paths are possible). + + + + + + + Normally, the regression tests should be run as the postgres user since + the 'src/test/regress' directory and sub-directories are owned by the + postgres user. If you run the regression test as another user the + 'src/test/regress' directory tree must be writeable by that user. + + + + It was formerly necessary to run the postmaster with system time zone + set to PST, but this is no longer required. You can run the regression + tests under your normal postmaster configuration. The test script will + set the PGTZ environment variable to ensure that timezone-dependent tests + produce the expected results. However, your system must provide + library support for the PST8PDT time zone, or the timezone-dependent + tests will fail. + To verify that your machine does have this support, type + the following: + + +setenv TZ PST8PDT +date + + + + + The "date" command above should have returned the current system time + in the PST8PDT time zone. If the PST8PDT database is not available, then + your system may have returned the time in GMT. If the PST8PDT time zone + is not available, you can set the time zone rules explicitly: + +setenv PGTZ PST8PDT7,M04.01.0,M10.05.03 + + + + + The directory layout for the regression test area is: + +
+ Directory Layout - output/ ... .source files that are converted using 'make all' into - .out files in the 'expected' subdirectory + Kerberos - sql/ ...... .sql files used to perform the regression tests + + + + Directory + Description + + + + + Directory + Description + + + input + + Source files that are converted using + make all into + some of the .sql files in the + sql subdirectory. + + - expected/ . .out files that represent what we *expect* the results to - look like + + output + + Source files that are converted using + make all into + .out files in the + expected subdirectory. + + - results/ .. .out files that contain what the results *actually* look - like. Also used as temporary storage for table copy testing. + + sql + + .sql files used to perform the + regression tests. + + - tmp_check/ temporary installation created by parallel testing script. - - - + + expected + + .out files that represent what we + expect the results to + look like. + + + + + results + + .out files that contain what the results + actually look + like. Also used as temporary storage for table copy testing. + + + + + tmp_check + + Temporary installation created by parallel testing script. + + + + +
+ + - - Regression Test Procedure + + Regression Test Procedure - + Commands were tested on RedHat Linux version 4.2 using the bash shell. Except where noted, they will probably work on most systems. Commands - like ps and tar vary wildly on what options you should use on each - platform. Use common sense before typing in these commands. - + like ps and tar vary + wildly on what options you should use on each + platform. Use common sense before typing in these commands. + - - <ProductName>Postgres</ProductName> Regression Test + + <productname>Postgres</productname> Regression Test - - + + Prepare the files needed for the regression test with: - + cd /usr/src/pgsql/src/test/regress gmake clean gmake all - + You can skip "gmake clean" if this is the first time you are running the tests. - - This step compiles a C + + This step compiles a C program with PostgreSQL extension functions into a shared library. Localized SQL scripts and output-comparison files are also created for the tests that need them. The localization replaces macros in the source files with absolute pathnames and user names. - + - - + + If you intend to use the "sequential" test procedure, which tests an already-installed postmaster, be sure that the postmaster is running. If it isn't already running, start the postmaster in an available window by typing - + postmaster - + or start the postmaster daemon running in the background by typing - + cd nohup postmaster > regress.log 2>&1 & - + The latter is probably preferable, since the regression test log will be quite lengthy (60K or so, in - Postgres 7.0) and you might want to + Postgres 7.0) and you might want to review it for clues if things go wrong. - - - Do not run postmaster from the root account. - - - - + + + Do not run postmaster from the root account. + + + + - - + + Run the regression tests. For a sequential test, type - + cd /usr/src/pgsql/src/test/regress gmake runtest - + For a parallel test, type - + cd /usr/src/pgsql/src/test/regress gmake runcheck - + The sequential test just runs the test scripts using your already-running postmaster. The parallel test will perform a complete installation of - Postgres into a temporary directory, + Postgres into a temporary directory, start a private postmaster therein, and then run the test scripts. Finally it will kill the private postmaster (but the temporary directory isn't removed automatically). - - + + - - + + You should get on the screen (and also written to file ./regress.out) a series of statements stating which tests passed and which tests failed. Please note that it can be normal for some of the tests to "fail" due to platform-specific variations. See the next section for details on determining whether a "failure" is significant. - - + + Some of the tests, notably "numeric", can take a while, especially on slower platforms. Have patience. - - + + - - + + After running the tests and examining the results, type - + cd /usr/src/pgsql/src/test/regress gmake clean - + to recover the temporary disk space used by the tests. If you ran a sequential test, also type - + dropdb regression - - - + + + - + - - Regression Analysis + + Regression Analysis - + The actual outputs of the regression tests are in files in the ./results directory. The test script uses diff to compare each output file @@ -270,101 +317,101 @@ The runtime path is /usr/local/pgsql (other paths are possible). saved for your inspection in ./regression.diffs. (Or you can run diff yourself, if you prefer.) - + - + The files might not compare exactly. The test script will report any difference as a "failure", but the difference might be due to small cross-system differences in error message wording, math library behavior, etc. "Failures" of this type do not indicate a problem with - Postgres. - + Postgres. + - + Thus, it is necessary to examine the actual differences for each "failed" test to determine whether there is really a problem. The following paragraphs attempt to provide some guidance in determining whether a difference is significant or not. - + - - Error message differences + + Error message differences - + Some of the regression tests involve intentional invalid input values. Error messages can come from either the Postgres code or from the host platform system routines. In the latter case, the messages may vary between platforms, but should reflect similar information. These differences in messages will result in a "failed" regression test which can be validated by inspection. - + - + - - Date and time differences + + Date and time differences - + Most of the date and time results are dependent on timezone environment. The reference files are generated for timezone PST8PDT (Berkeley, California) and there will be apparent failures if the tests are not run with that timezone setting. The regression test driver sets environment variable PGTZ to PST8PDT to ensure proper results. - + - + Some of the queries in the "timestamp" test will fail if you run the test on the day of a daylight-savings time changeover, or the day before or after one. These queries assume that the intervals between midnight yesterday, midnight today and midnight tomorrow are exactly twenty-four hours ... which is wrong if daylight-savings time went into or out of effect meanwhile. - + - + There appear to be some systems which do not accept the recommended syntax for explicitly setting the local time zone rules; you may need to use a different PGTZ setting on such machines. - + - + Some systems using older timezone libraries fail to apply daylight-savings corrections to pre-1970 dates, causing pre-1970 PDT times to be displayed in PST instead. This will result in localized differences in the test results. - + - + - - Floating point differences + + Floating point differences - - Some of the tests involve computing 64-bit (float8) numbers from table + + Some of the tests involve computing 64-bit (float8) numbers from table columns. Differences in results involving mathematical functions of - float8 columns have been observed. The float8 + float8 columns have been observed. The float8 and geometry tests are particularly prone to small differences across platforms. Human eyeball comparison is needed to determine the real significance of these differences which are usually 10 places to the right of the decimal point. - + - + Some systems signal errors from pow() and exp() differently from the mechanism expected by the current Postgres code. - + - + - - Polygon differences + + Polygon differences - + Several of the tests involve operations on geographic date about the Oakland/Berkley CA street map. The map data is expressed as polygons - whose vertices are represented as pairs of float8 numbers (decimal + whose vertices are represented as pairs of float8 numbers (decimal latitude and longitude). Initially, some tables are created and loaded with geographic data, then some views are created which join two tables using the polygon intersection operator (##), then a select @@ -374,65 +421,65 @@ The runtime path is /usr/local/pgsql (other paths are possible). in the 2nd or 3rd place to the right of the decimal point. The SQL statements where these problems occur are the following: - + QUERY: SELECT * from street; QUERY: SELECT * from iexit; - - + + - + - - Random differences + + Random differences - + There is at least one case in the "random" test script that is intended to produce random results. This causes random to fail the regression test once in a while (perhaps once in every five to ten trials). Typing - + diff results/random.out expected/random.out - + should produce only one or a few lines of differences. You need not worry unless the random test always fails in repeated attempts. (On the other hand, if the random test is never reported to fail even in many trials of the regress tests, you probably should worry.) - + - + - - The <Quote>expected</Quote> files + + The "expected" files - - The ./expected/*.out files were adapted from the original monolithic - expected.input file provided by Jolly Chen et al. Newer versions of these + + The ./expected/*.out files were adapted from the original monolithic + expected.input file provided by Jolly Chen et al. Newer versions of these files generated on various development machines have been substituted after careful (?) inspection. Many of the development machines are running a Unix OS variant (FreeBSD, Linux, etc) on Ix86 hardware. - The original expected.input file was created on a SPARC Solaris 2.4 - system using the postgres5-1.02a5.tar.gz source tree. It was compared + The original expected.input file was created on a SPARC Solaris 2.4 + system using the postgres5-1.02a5.tar.gz source tree. It was compared with a file created on an I386 Solaris 2.4 system and the differences were only in the floating point polygons in the 3rd digit to the right of the decimal point. - The original sample.regress.out file was from the postgres-1.01 release + The original sample.regress.out file was from the postgres-1.01 release constructed by Jolly Chen. It may - have been created on a DEC ALPHA machine as the Makefile.global + have been created on a DEC ALPHA machine as the Makefile.global in the postgres-1.01 release has PORTNAME=alpha. - + - + - + - - Platform-specific comparison files + + Platform-specific comparison files - + Since some of the tests inherently produce platform-specific results, we have provided a way to supply platform-specific result comparison files. Frequently, the same variation applies to multiple platforms; @@ -441,42 +488,59 @@ The runtime path is /usr/local/pgsql (other paths are possible). So, to eliminate bogus test "failures" for a particular platform, you must choose or make a variant result file, and then add a line to the mapping file, which is "resultmap". - + - + Each line in the mapping file is of the form - + testname/platformnamepattern=comparisonfilename - + The test name is just the name of the particular regression test module. The platform name pattern is a pattern in the style of expr(1) (that is, a regular expression with an implicit ^ anchor at the start). It is matched against the platform name as printed by config.guess. The comparison file name is the name of the substitute result comparison file. - + - + For example: the int2 regress test includes a deliberate entry of a value that is too large to fit in int2. The specific error message that is produced is platform-dependent; our reference platform emits - + ERROR: pg_atoi: error reading "100000": Numerical result out of range - + but a fair number of other Unix platforms emit - + ERROR: pg_atoi: error reading "100000": Result too large - + Therefore, we provide a variant comparison file, int2-too-large.out, that includes this spelling of the error message. To silence the bogus "failure" message on HPPA platforms, resultmap includes - + int2/hppa=int2-too-large - + which will trigger on any machine for which config.guess's output begins with 'hppa'. Other lines in resultmap select the variant comparison file for other platforms where it's appropriate. - + - + -
+
+ + diff --git a/doc/src/sgml/release.sgml b/doc/src/sgml/release.sgml index 7b93b645c0..0df2c3ba83 100644 --- a/doc/src/sgml/release.sgml +++ b/doc/src/sgml/release.sgml @@ -1,5 +1,5 @@ @@ -19,9 +19,11 @@ $Header: /cvsroot/pgsql/doc/src/sgml/release.sgml,v 1.46 2000/05/02 17:06:10 mom --> - This release shows the continued growth of PostgreSQL. There are more - changes in 7.0 than in any previous release. Don't be concerned this is - a dot-zero release. We do our best to put out only solid releases, and + This release contains improvements in many areas, demonstrating + the continued growth of PostgreSQL. + There are more improvements and fixes in 7.0 than in any previous + release. The developers have confidence that this is the best + release yet; we do our best to put out only solid releases, and this one is no exception. @@ -49,7 +51,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/release.sgml,v 1.46 2000/05/02 17:06:10 mom Continuing on work started a year ago, the optimizer has been - overhauled, allowing improved query execution and better performance + improved, allowing better query plan selection and faster performance with less memory usage. @@ -80,7 +82,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/release.sgml,v 1.46 2000/05/02 17:06:10 mom - + @@ -102,10 +105,75 @@ $Header: /cvsroot/pgsql/doc/src/sgml/release.sgml,v 1.46 2000/05/02 17:06:10 mom A dump/restore using pg_dump is required for those wishing to migrate data from any previous release of Postgres. - For those upgrading from 6.5.*, you can use + For those upgrading from 6.5.*, you may instead use pg_upgrade to upgrade to this - release. + release; however, a full dump/reload installation is always the + most robust method for upgrades. + + + Interface and compatibility issues to consider for the new + release include: + + + + + The date/time types datetime and + timespan have been superceded by the + SQL92-defined types timestamp and + interval. Although there has been some effort to + ease the transition by allowing + Postgres to recognize + the deprecated type names and translate them to the new type + names, this mechanism may not be completely transparent to + your existing application. + + + + + + + + The optimizer has been substantially improved in the area of + query cost estimation. In some cases, this will result in + decreased query times as the optimizer makes a better choice + for the preferred plan. However, in a small number of cases, + usually involving pathological distributions of data, your + query times may go up. If you are dealing with large amounts + of data, you may want to check your queries to verify + performance. + + + + + + The JDBC and ODBC + interfaces have been upgraded and extended. + + + + + + The string function CHAR_LENGTH is now a + native function. Previous versions translated this into a call + to LENGTH, which could result in + ambiguity with other types implementing + LENGTH such as the geometric types. + + + + + @@ -114,7 +182,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/release.sgml,v 1.46 2000/05/02 17:06:10 mom Bug Fixes --------- -Prevent function calls with more than maximum number of arguments (Tom) +Prevent function calls exceeding maximum number of arguments (Tom) Improve CASE construct (Tom) Fix SELECT coalesce(f1,0) FROM int4_tbl GROUP BY f1 (Tom) Fix SELECT sentence.words[0] FROM sentence GROUP BY sentence.words[0] (Tom) @@ -125,15 +193,15 @@ Fix for SELECT a/2, a/2 FROM test_missing_target GROUP BY a/2 (Tom) Fix for subselects in INSERT ... SELECT (Tom) Prevent INSERT ... SELECT ... ORDER BY (Tom) Fixes for relations greater than 2GB, including vacuum -Improve communication of system table changes to other running backends (Tom) -Improve communication of user table modifications to other running backends (Tom) +Improve propagating system table changes to other backends (Tom) +Improve propagating user table changes to other backends (Tom) Fix handling of temp tables in complex situations (Bruce, Tom) -Allow table locking when tables opened, improving concurrent reliability (Tom) +Allow table locking at table open, improving concurrent reliability (Tom) Properly quote sequence names in pg_dump (Ross J. Reedstrom) Prevent DROP DATABASE while others accessing Prevent any rows from being returned by GROUP BY if no rows processed (Tom) Fix SELECT COUNT(1) FROM table WHERE ...' if no rows matching WHERE (Tom) -Fix pg_upgrade so it works for MVCC(Tom) +Fix pg_upgrade so it works for MVCC (Tom) Fix for SELECT ... WHERE x IN (SELECT ... HAVING SUM(x) > 1) (Tom) Fix for "f1 datetime DEFAULT 'now'" (Tom) Fix problems with CURRENT_DATE used in DEFAULT (Tom) @@ -141,8 +209,8 @@ Allow comment-only lines, and ;;; lines too. (Tom) Improve recovery after failed disk writes, disk full (Hiroshi) Fix cases where table is mentioned in FROM but not joined (Tom) Allow HAVING clause without aggregate functions (Tom) -Fix for "--" comment and no trailing newline, as seen in Perl -Improve pg_dump failure error reports (Bruce) +Fix for "--" comment and no trailing newline, as seen in perl interface +Improve pg_dump failure error reports (Bruce) Allow sorts and hashes to exceed 2GB file sizes (Tom) Fix for pg_dump dumping of inherited rules (Tom) Fix for NULL handling comparisons (Tom) @@ -197,8 +265,7 @@ Update jdbc protocol to 2.0 (Jens GlaserMike Mascari) libpq's PQsetNoticeProcessor function now returns previous hook(Peter E) @@ -227,8 +293,7 @@ Change backend-side COPY to write files with permissions 644 not 666 (Tom) Force permissions on PGDATA directory to be secure, even if it exists (Tom) Added psql LASTOID variable to return last inserted oid (Peter E) Allow concurrent vacuum and remove pg_vlock vacuum lock file (Tom) -Add permissions check so only Postgres superuser or table owner can -vacuum (Peter E) +Add permissions check for vacuum (Peter E) New libpq functions to allow asynchronous connections: PQconnectStart(), PQconnectPoll(), PQresetStart(), PQresetPoll(), PQsetenvStart(), PQsetenvPoll(), PQsetenvAbort (Ewan Mellor) @@ -236,8 +301,8 @@ New libpq PQsetenv() function (Ewan Mellor) create/alter user extension (Peter E) New postmaster.pid and postmaster.opts under $PGDATA (Tatsuo) New scripts for create/drop user/db (Peter E) -Major psql overhaul(Peter E) -Add const to libpq interface(Peter E) +Major psql overhaul (Peter E) +Add const to libpq interface (Peter E) New libpq function PQoidValue (Peter E) Show specific non-aggregate causing problem with GROUP BY (Tom) Make changes to pg_shadow recreate pg_pwd file (Peter E) @@ -281,12 +346,11 @@ Allow SELECT .. FOR UPDATE in PL/pgSQL (Hiroshi) Enable backward sequential scan even after reaching EOF (Hiroshi) Add btree indexing of boolean values, >= and <= (Don Baccus) Print current line number when COPY FROM fails (Massimo) -Recognize special case of POSIX time zone: "GMT+8" and "GMT-8" (Thomas) -Add DEC as synonym for "DECIMAL" (Thomas) +Recognize POSIX time zone e.g. "PST+8" and "GMT-8" (Thomas) +Add DEC as synonym for DECIMAL (Thomas) Add SESSION_USER as SQL92 keyword, same as CURRENT_USER (Thomas) -Implement column aliases (aka correlation names) and join syntax (Thomas) -Allow queries like SELECT a FROM t1 tx (a) (Thomas) -Allow queries like SELECT * FROM t1 NATURAL JOIN t2 (Thomas) +Implement SQL92 column aliases (aka correlation names) (Thomas) +Implement SQL92 join syntax (Thomas) Make INTERVAL reserved word allowed as a column identifier (Thomas) Implement REINDEX command (Hiroshi) Accept ALL in aggregate function SUM(ALL col) (Tom) @@ -322,9 +386,8 @@ Allow bare column names to be subscripted as arrays (Tom) Improve type casting of int and float constants (Tom) Cleanups for int8 inputs, range checking, and type conversion (Tom) Fix for SELECT timespan('21:11:26'::time) (Tom) -Fix for netmask('x.x.x.x/0') is 255.255.255.255 instead of 0.0.0.0 - (Oleg Sharoiko) -Add btree index on NUMERIC(Jan) +netmask('x.x.x.x/0') is 255.255.255.255 instead of 0.0.0.0 (Oleg Sharoiko) +Add btree index on NUMERIC (Jan) Perl fix for large objects containing NUL characters (Douglas Thomson) ODBC fix for for large objects (free) Fix indexing of cidr data type @@ -338,26 +401,25 @@ Make char_length()/octet_length including trailing blanks (Tom) Made abstime/reltime use int4 instead of time_t (Peter E) New lztext data type for compressed text fields Revise code to handle coercion of int and float constants (Tom) -New C-routines to implement a BIT and BIT VARYING type in /contrib - (Adriaan Joubert) +Start at new code to implement a BIT and BIT VARYING type (Adriaan Joubert) NUMERIC now accepts scientific notation (Tom) NUMERIC to int4 rounds (Tom) Convert float4/8 to NUMERIC properly (Tom) Allow type conversion with NUMERIC (Thomas) Make ISO date style (2000-02-16 09:33) the default (Thomas) -Add NATIONAL CHAR [ VARYING ] +Add NATIONAL CHAR [ VARYING ] (Thomas) Allow NUMERIC round and trunc to accept negative scales (Tom) New TIME WITH TIME ZONE type (Thomas) Add MAX()/MIN() on time type (Thomas) Add abs(), mod(), fac() for int8 (Thomas) -Add round(), sqrt(), cbrt(), pow() -Rename NUMERIC power() to pow() -Improved TRANSLATE() function +Rename functions to round(), sqrt(), cbrt(), pow() for float8 (Thomas) +Add transcendental math functions (e.g. sin(), acos()) for float8 (Thomas) +Add exp() and ln() for NUMERIC type +Rename NUMERIC power() to pow() (Thomas) +Improved TRANSLATE() function (Edwin Ramirez, Tom) Allow X=-Y operators (Tom) -Add exp() and ln() as NUMERIC types -Allow SELECT float8(COUNT(*)) / (SELECT COUNT(*) FROM int4_tbl) FROM int4_tbl - GROUP BY f1; (Tom) -Allow LOCALE to use indexes in regular expression searches(Tom) +Allow SELECT float8(COUNT(*))/(SELECT COUNT(*) FROM t) FROM t GROUP BY f1; (Tom) +Allow LOCALE to use indexes in regular expression searches (Tom) Allow creation of functional indexes to use default types Performance @@ -378,13 +440,12 @@ Prefer index scans in cases where ORDER BY/GROUP BY is required (Tom) Allocate large memory requests in fix-sized chunks for performance (Tom) Fix vacuum's performance by reducing memory allocation requests (Tom) Implement constant-expression simplification (Bernard Frankpitt, Tom) -Allow more than first column to be used to determine start of index scan - (Hiroshi) +Use secondary columns to be used to determine start of index scan (Hiroshi) Prevent quadruple use of disk space when doing internal sorting (Tom) Faster sorting by calling fewer functions (Tom) Create system indexes to match all system caches (Bruce, Hiroshi) -Make system caches use system indexes(Bruce) -Make all system indexes unique(Bruce) +Make system caches use system indexes (Bruce) +Make all system indexes unique (Bruce) Improve pg_statistics management for VACUUM speed improvement (Tom) Flush backend cache less frequently (Tom, Hiroshi) COPY now reuses previous memory allocation, improving performance (Tom) @@ -398,17 +459,17 @@ New SET variable to control optimizer costs (Tom) Optimizer queries based on LIMIT, OFFSET, and EXISTS qualifications (Tom) Reduce optimizer internal housekeeping of join paths for speedup (Tom) Major subquery speedup (Tom) -Fewer fsync writes when fsync is not disabled(Tom) -Improved LIKE optimizer estimates(Tom) -Prevent fsync in SELECT-only queries(Vadim) -Make index creation use psort code, because it is now faster(Tom) +Fewer fsync writes when fsync is not disabled (Tom) +Improved LIKE optimizer estimates (Tom) +Prevent fsync in SELECT-only queries (Vadim) +Make index creation use psort code, because it is now faster (Tom) Allow creation of sort temp tables > 1 Gig Source Tree Changes ------------------- Fix for linux PPC compile New generic expression-tree-walker subroutine (Tom) -Change form() to varargform() to prevent portability problems. +Change form() to varargform() to prevent portability problems Improved range checking for large integers on Alphas Clean up #include in /include directory (Bruce) Add scripts for checking includes (Bruce) @@ -418,9 +479,9 @@ Enable WIN32 compilation of libpq Alpha spinlock fix from Uncle George Overhaul of optimizer data structures (Tom) Fix to cygipc library (Yutaka Tanida) -Allow pgsql to work on newer Cygwin snapshots(Dan) +Allow pgsql to work on newer Cygwin snapshots (Dan) New catalog version number (Tom) -Add Linux ARM. +Add Linux ARM Rename heap_replace to heap_update Update for QNX (Dr. Andreas Kardos) New platform-specific regression handling (Tom) @@ -1636,7 +1697,7 @@ Support for client-side environment variables to specify time zone and date styl Socket interface for client/server connection. This is the default now so you may need to start postmaster with the --i flag. + flag. @@ -1646,11 +1707,12 @@ Better password authorization mechanisms. Default table permissions have changed - - -Old-style time travel has been removed. Performance has been improved. - - + + + Old-style time travel + has been removed. Performance has been improved. + + diff --git a/doc/src/sgml/rules.sgml b/doc/src/sgml/rules.sgml index 2431ffd697..f17cfd77b0 100644 --- a/doc/src/sgml/rules.sgml +++ b/doc/src/sgml/rules.sgml @@ -854,18 +854,18 @@ There was a long time where the Postgres rule system was considered broken. The use of rules was not - recommended and the only part working where view rules. And also - these view rules made problems because the rule system wasn't able - to apply them properly on other statements than a SELECT (for + recommended and the only part working was view rules. And also + these view rules gave problems because the rule system wasn't able + to apply them properly on statements other than a SELECT (for example an UPDATE that used data from a view didn't work). - During that time, development moved on and many features where + During that time, development moved on and many features were added to the parser and optimizer. The rule system got more and more out of sync with their capabilities and it became harder and harder - to start fixing it. Thus, noone did. + to start fixing it. Thus, no one did. @@ -2088,7 +2088,7 @@ Merge Join - Another situation are cases on UPDATE where it depends on the + Another situation is cases on UPDATE where it depends on the change of an attribute if an action should be performed or not. In Postgres version 6.4, the attribute specification for rule events is disabled (it will have @@ -2096,7 +2096,7 @@ Merge Join - stay tuned). So for now the only way to create a rule as in the shoelace_log example is to do it with a rule qualification. That results in an extra query that is - performed allways, even if the attribute of interest cannot + performed always, even if the attribute of interest cannot change at all because it does not appear in the targetlist of the initial query. When this is enabled again, it will be one more advantage of rules over triggers. Optimization of @@ -2108,7 +2108,7 @@ Merge Join decision. The rule system will know it by looking up the targetlist and will suppress the additional query completely if the attribute isn't touched. So the rule, qualified or not, - will only do it's scan's if there ever could be something to do. + will only do its scans if there ever could be something to do. @@ -2121,3 +2121,20 @@ Merge Join + + diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml index c4989e92cf..dbe984c7b3 100644 --- a/doc/src/sgml/runtime.sgml +++ b/doc/src/sgml/runtime.sgml @@ -1,5 +1,5 @@ @@ -16,7 +16,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/runtime.sgml,v 1.9 2000/04/23 00:25:06 tgl All Postgres commands that are executed directly from a Unix shell are - found in the directory .../bin. Including this directory in + found in the directory .../bin. Including this directory in your search path will make executing the commands easier. diff --git a/doc/src/sgml/signals.sgml b/doc/src/sgml/signals.sgml index 23625aed41..7f7e597e0b 100644 --- a/doc/src/sgml/signals.sgml +++ b/doc/src/sgml/signals.sgml @@ -191,7 +191,7 @@ FloatExceptionHandler -kill(*,signal) means sending a signal to all backends. +"kill(*,signal)" means sending a signal to all backends. @@ -247,3 +247,20 @@ cat old_pg_options > $DATA_DIR/pg_options + + diff --git a/doc/src/sgml/spi.sgml b/doc/src/sgml/spi.sgml index ada3d321ed..d453b1ed70 100644 --- a/doc/src/sgml/spi.sgml +++ b/doc/src/sgml/spi.sgml @@ -157,7 +157,10 @@ Return status Usage -XXX thomas 1997-12-24 + + diff --git a/doc/src/sgml/sql.sgml b/doc/src/sgml/sql.sgml index e030d4dbf8..f61b085c2f 100644 --- a/doc/src/sgml/sql.sgml +++ b/doc/src/sgml/sql.sgml @@ -1,5 +1,5 @@ @@ -24,7 +24,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/sql.sgml,v 1.8 2000/04/07 13:30:58 thomas E SQL has become the most popular relational query language. - The name SQL is an abbreviation for + The name "SQL" is an abbreviation for Structured Query Language. In 1974 Donald Chamberlin and others defined the language SEQUEL (Structured English Query @@ -759,8 +759,8 @@ tr(A,B)=t∧tr(C,D)=t can be formulated using relational algebra can also be formulated using the relational calculus and vice versa. This was first proved by E. F. Codd in - 1972. This proof is based on an algorithm (Codd's reduction - algorithm) by which an arbitrary expression of the relational + 1972. This proof is based on an algorithm ("Codd's reduction + algorithm") by which an arbitrary expression of the relational calculus can be reduced to a semantically equivalent expression of relational algebra. For a more detailed discussion on that refer to diff --git a/doc/src/sgml/start.sgml b/doc/src/sgml/start.sgml index ccb43fbb0f..d895fe52fb 100644 --- a/doc/src/sgml/start.sgml +++ b/doc/src/sgml/start.sgml @@ -1,5 +1,5 @@ @@ -19,7 +19,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/start.sgml,v 1.10 2000/04/07 13:30:58 thoma the database directories and started the postmaster process. This person does not have to be the Unix - superuser (root) + superuser ("root") or the computer system administrator; a person can install and use Postgres without any special accounts or privileges. @@ -34,9 +34,9 @@ $Header: /cvsroot/pgsql/doc/src/sgml/start.sgml,v 1.10 2000/04/07 13:30:58 thoma Throughout this manual, any examples that begin with - the character % are commands that should be typed + the character "%" are commands that should be typed at the Unix shell prompt. Examples that begin with the - character * are commands in the Postgres query + character "*" are commands in the Postgres query language, Postgres SQL. @@ -346,7 +346,7 @@ mydb=> workspace maintained by the terminal monitor. The psql program responds to escape codes that begin - with the backslash character, \ For example, you + with the backslash character, "\" For example, you can get help on the syntax of various Postgres SQL commands by typing: @@ -364,7 +364,7 @@ mydb=> \g This tells the server to process the query. If you - terminate your query with a semicolon, the \g is not + terminate your query with a semicolon, the "\g" is not necessary. psql will automatically process semicolon terminated queries. @@ -386,9 +386,9 @@ mydb=> \q White space (i.e., spaces, tabs and newlines) may be used freely in SQL queries. Single-line comments are denoted by - --. Everything after the dashes up to the end of the + "--". Everything after the dashes up to the end of the line is ignored. Multiple-line comments, and comments within a line, - are denoted by /* ... */ + are denoted by "/* ... */". diff --git a/doc/src/sgml/syntax.sgml b/doc/src/sgml/syntax.sgml index 457e46f035..7a7f75a875 100644 --- a/doc/src/sgml/syntax.sgml +++ b/doc/src/sgml/syntax.sgml @@ -1,5 +1,5 @@ @@ -65,7 +65,7 @@ $Header: /cvsroot/pgsql/doc/src/sgml/syntax.sgml,v 1.19 2000/04/08 23:12:00 momj Any string can be specified as an identifier if surrounded by double quotes (like this!). Some care is required since such an identifier will be case sensitive - and will retain embedded whitespace other special characters. + and will retain embedded whitespace and most other special characters. @@ -84,6 +84,7 @@ EXPLAIN EXTEND LISTEN LOAD LOCK MOVE NEW NONE NOTIFY +OFFSET RESET SETOF SHOW UNLISTEN UNTIL @@ -98,19 +99,27 @@ VACUUM VERBOSE are allowed to be present as column labels, but not as identifiers: -CASE COALESCE CROSS CURRENT CURRENT_USER CURRENT_SESSION -DEC DECIMAL -ELSE END -FALSE FOREIGN +ALL ANY ASC BETWEEN BIT BOTH +CASE CAST CHAR CHARACTER CHECK COALESCE COLLATE COLUMN + CONSTRAINT CROSS CURRENT CURRENT_DATE CURRENT_TIME + CURRENT_TIMESTAMP CURRENT_USER +DEC DECIMAL DEFAULT DESC DISTINCT +ELSE END EXCEPT EXISTS EXTRACT +FALSE FLOAT FOR FOREIGN FROM FULL GLOBAL GROUP -LOCAL -NULLIF NUMERIC -ORDER -POSITION PRECISION -SESSION_USER -TABLE THEN TRANSACTION TRUE -USER -WHEN +HAVING +IN INNER INTERSECT INTO IS +JOIN +LEADING LEFT LIKE LOCAL +NATURAL NCHAR NOT NULL NULLIF NUMERIC +ON OR ORDER OUTER OVERLAPS +POSITION PRECISION PRIMARY PUBLIC +REFERENCES RIGHT +SELECT SESSION_USER SOME SUBSTRING +TABLE THEN TO TRANSACTION TRIM TRUE +UNION UNIQUE USER +VARCHAR +WHEN WHERE The following are Postgres @@ -118,12 +127,9 @@ WHEN or SQL3 reserved words: -ADD ALL ALTER AND ANY AS ASC -BEGIN BETWEEN BOTH BY -CASCADE CAST CHAR CHARACTER CHECK CLOSE - COLLATE COLUMN COMMIT CONSTRAINT CREATE - CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP - CURSOR +ADD ALTER AND AS +BEGIN BY +CASCADE CLOSE COMMIT CREATE CURSOR DECLARE DEFAULT DELETE DESC DISTINCT DROP EXECUTE EXISTS EXTRACT FETCH FLOAT FOR FROM FULL @@ -148,10 +154,10 @@ WHERE WITH WORK The following are SQL92 reserved key words which are not Postgres reserved key words, but which if used as function names are always translated into the function - length: + CHAR_LENGTH: -CHAR_LENGTH CHARACTER_LENGTH +CHARACTER_LENGTH @@ -167,11 +173,27 @@ BOOLEAN DOUBLE FLOAT INT INTEGER INTERVAL REAL SMALLINT + The following are not keywords of any kind, but when used in the + context of a type name are translated into a native + Postgres type, and when used in the + context of a function name are translated into a native function: + + +DATETIME TIMESPAN + + + (translated to TIMESTAMP and INTERVAL, + respectively). This feature is intended to help with + transitioning to v7.0, and will be removed in the next full + release (likely v7.1). + + + The following are either SQL92 or SQL3 reserved key words which are not key words in Postgres. These have no proscribed usage in Postgres - at the time of writing (v6.5) but may become reserved key words in the + at the time of writing (v7.0) but may become reserved key words in the future: @@ -185,9 +207,10 @@ BOOLEAN DOUBLE FLOAT INT INTEGER INTERVAL REAL SMALLINT ALLOCATE ARE ASSERTION AT AUTHORIZATION AVG -BIT BIT_LENGTH -CASCADED CATALOG COLLATION CONNECT CONNECTION - CONTINUE CONVERT CORRESPONDING COUNT +BIT_LENGTH +CASCADED CATALOG CHAR_LENGTH CHARACTER_LENGTH COLLATION + CONNECT CONNECTION CONTINUE CONVERT CORRESPONDING COUNT + CURRENT_SESSION DATE DEALLOCATE DEC DESCRIBE DESCRIPTOR DIAGNOSTICS DISCONNECT DOMAIN ESCAPE EXCEPT EXCEPTION EXEC EXTERNAL @@ -231,20 +254,21 @@ WHENEVER WRITE ACCESS AFTER AGGREGATE BACKWARD BEFORE -CACHE CREATEDB CREATEUSER CYCLE +CACHE COMMENT CREATEDB CREATEUSER CYCLE DATABASE DELIMITERS EACH ENCODING EXCLUSIVE -FORWARD FUNCTION +FORCE FORWARD FUNCTION HANDLER INCREMENT INDEX INHERITS INSENSITIVE INSTEAD ISNULL LANCOMPILER LOCATION MAXVALUE MINVALUE MODE -NOCREATEDB NOCREATEUSER NOTHING NOTNULL +NOCREATEDB NOCREATEUSER NOTHING NOTIFY NOTNULL OIDS OPERATOR PASSWORD PROCEDURAL -RECIPE RENAME RETURNS ROW RULE +RECIPE REINDEX RENAME RETURNS ROW RULE SEQUENCE SERIAL SHARE START STATEMENT STDIN STDOUT -TRUSTED +TEMP TRUSTED +UNLISTEN UNTIL VALID VERSION diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml index e9e6d9bc44..1e76524bae 100644 --- a/doc/src/sgml/trigger.sgml +++ b/doc/src/sgml/trigger.sgml @@ -1,187 +1,332 @@ - -Triggers - - -Postgres has various client interfaces -such as Perl, Tcl, Python and C, as well as two -Procedural Languages -(PL). It is also possible -to call C functions as trigger actions. Note that STATEMENT-level trigger -events are not supported in the current version. You can currently specify -BEFORE or AFTER on INSERT, DELETE or UPDATE of a tuple as a trigger event. - - - -Trigger Creation - - - If a trigger event occurs, the trigger manager (called by the Executor) -initializes the global structure TriggerData *CurrentTriggerData (described -below) and calls the trigger function to handle the event. - - - - The trigger function must be created before the trigger is created as a -function taking no arguments and returns opaque. - - - - The syntax for creating triggers is as follows: - - - CREATE TRIGGER <trigger name> <BEFORE|AFTER> <INSERT|DELETE|UPDATE> - ON <relation name> FOR EACH <ROW|STATEMENT> - EXECUTE PROCEDURE <procedure name> (<function args>); - - - - - The name of the trigger is used if you ever have to delete the trigger. -It is used as an argument to the DROP TRIGGER command. - - - - The next word determines whether the function is called before or after -the event. - - - - The next element of the command determines on what event(s) will trigger -the function. Multiple events can be specified separated by OR. - - - - The relation name determines which table the event applies to. - - - - The FOR EACH statement determines whether the trigger is fired for each -affected row or before (or after) the entire statement has completed. - - - - The procedure name is the C function called. - - - - The args are passed to the function in the CurrentTriggerData structure. -The purpose of passing arguments to the function is to allow different -triggers with similar requirements to call the same function. - - - - Also, function may be used for triggering different relations (these -functions are named as "general trigger functions"). - - - - As example of using both features above, there could be a general -function that takes as its arguments two field names and puts the current -user in one and the current timestamp in the other. This allows triggers to -be written on INSERT events to automatically track creation of records in a -transaction table for example. It could also be used as a "last updated" -function if used in an UPDATE event. - - - - Trigger functions return HeapTuple to the calling Executor. This -is ignored for triggers fired after an INSERT, DELETE or UPDATE operation -but it allows BEFORE triggers to: - - - return NULL to skip the operation for the current tuple (and so the - tuple will not be inserted/updated/deleted); - - return a pointer to another tuple (INSERT and UPDATE only) which will - be inserted (as the new version of the updated tuple if UPDATE) instead - of original tuple. - - - - Note, that there is no initialization performed by the CREATE TRIGGER -handler. This will be changed in the future. Also, if more than one trigger -is defined for the same event on the same relation, the order of trigger -firing is unpredictable. This may be changed in the future. - - - - If a trigger function executes SQL-queries (using SPI) then these queries -may fire triggers again. This is known as cascading triggers. There is no -explicit limitation on the number of cascade levels. - - - - If a trigger is fired by INSERT and inserts a new tuple in the same -relation then this trigger will be fired again. Currently, there is nothing -provided for synchronization (etc) of these cases but this may change. At -the moment, there is function funny_dup17() in the regress tests which uses -some techniques to stop recursion (cascading) on itself... - - - - - -Interaction with the Trigger Manager - - - As mentioned above, when function is called by the trigger manager, -structure TriggerData *CurrentTriggerData is NOT NULL and initialized. So -it is better to check CurrentTriggerData against being NULL at the start -and set it to NULL just after fetching the information to prevent calls to -a trigger function not from the trigger manager. - - - - struct TriggerData is defined in src/include/commands/trigger.h: - - + + Triggers + + + Postgres has various client interfaces + such as Perl, Tcl, Python and C, as well as three + Procedural Languages + (PL). It is also possible + to call C functions as trigger actions. Note that STATEMENT-level trigger + events are not supported in the current version. You can currently specify + BEFORE or AFTER on INSERT, DELETE or UPDATE of a tuple as a trigger event. + + + + Trigger Creation + + + If a trigger event occurs, the trigger manager (called by the Executor) + initializes the global structure TriggerData *CurrentTriggerData (described + below) and calls the trigger function to handle the event. + + + + The trigger function must be created before the trigger is created as a + function taking no arguments and returns opaque. + + + + The syntax for creating triggers is as follows: + + +CREATE TRIGGER trigger [ BEFORE | AFTER ] [ INSERT | DELETE | UPDATE [ OR ... ] ] + ON relation FOR EACH [ ROW | STATEMENT ] + EXECUTE PROCEDURE procedure + (args); + + + where the arguments are: + + + + + trigger + + + + The name of the trigger is + used if you ever have to delete the trigger. + It is used as an argument to the DROP TRIGGER command. + + + + + + BEFORE + AFTER + + + Determines whether the function is called before or after + the event. + + + + + + INSERT + DELETE + UPDATE + + + The next element of the command determines on what event(s) will trigger + the function. Multiple events can be specified separated by OR. + + + + + + relation + + + The relation name determines which table the event applies to. + + + + + + ROW + STATEMENT + + + The FOR EACH clause determines whether the trigger is fired for each + affected row or before (or after) the entire statement has completed. + + + + + + procedure + + + The procedure name is the C function called. + + + + + + args + + + The arguments passed to the function in the CurrentTriggerData structure. + The purpose of passing arguments to the function is to allow different + triggers with similar requirements to call the same function. + + + + Also, procedure + may be used for triggering different relations (these + functions are named as "general trigger functions"). + + + + As example of using both features above, there could be a general + function that takes as its arguments two field names and puts the current + user in one and the current timestamp in the other. This allows triggers to + be written on INSERT events to automatically track creation of records in a + transaction table for example. It could also be used as a "last updated" + function if used in an UPDATE event. + + + + + + + + Trigger functions return HeapTuple to the calling Executor. This + is ignored for triggers fired after an INSERT, DELETE or UPDATE operation + but it allows BEFORE triggers to: + + + + + Return NULL to skip the operation for the current tuple (and so the + tuple will not be inserted/updated/deleted). + + + + + + Return a pointer to another tuple (INSERT and UPDATE only) which will + be inserted (as the new version of the updated tuple if UPDATE) instead + of original tuple. + + + + + + + Note that there is no initialization performed by the CREATE TRIGGER + handler. This will be changed in the future. Also, if more than one trigger + is defined for the same event on the same relation, the order of trigger + firing is unpredictable. This may be changed in the future. + + + + If a trigger function executes SQL-queries (using SPI) then these queries + may fire triggers again. This is known as cascading triggers. There is no + explicit limitation on the number of cascade levels. + + + + If a trigger is fired by INSERT and inserts a new tuple in the same + relation then this trigger will be fired again. Currently, there is nothing + provided for synchronization (etc) of these cases but this may change. At + the moment, there is function funny_dup17() in the regress tests which uses + some techniques to stop recursion (cascading) on itself... + + + + + Interaction with the Trigger Manager + + + As mentioned above, when function is called by the trigger manager, + structure TriggerData *CurrentTriggerData is NOT NULL and initialized. So + it is better to check CurrentTriggerData against being NULL at the start + and set it to NULL just after fetching the information to prevent calls to + a trigger function not from the trigger manager. + + + + struct TriggerData is defined in src/include/commands/trigger.h: + + typedef struct TriggerData { - TriggerEvent tg_event; - Relation tg_relation; - HeapTuple tg_trigtuple; - HeapTuple tg_newtuple; - Trigger *tg_trigger; + TriggerEvent tg_event; + Relation tg_relation; + HeapTuple tg_trigtuple; + HeapTuple tg_newtuple; + Trigger *tg_trigger; } TriggerData; - - - -tg_event - describes event for which the function is called. You may use the - following macros to examine tg_event: - - TRIGGER_FIRED_BEFORE(event) returns TRUE if trigger fired BEFORE; - TRIGGER_FIRED_AFTER(event) returns TRUE if trigger fired AFTER; - TRIGGER_FIRED_FOR_ROW(event) returns TRUE if trigger fired for - ROW-level event; - TRIGGER_FIRED_FOR_STATEMENT(event) returns TRUE if trigger fired for - STATEMENT-level event; - TRIGGER_FIRED_BY_INSERT(event) returns TRUE if trigger fired by INSERT; - TRIGGER_FIRED_BY_DELETE(event) returns TRUE if trigger fired by DELETE; - TRIGGER_FIRED_BY_UPDATE(event) returns TRUE if trigger fired by UPDATE. - -tg_relation - is pointer to structure describing the triggered relation. Look at - src/include/utils/rel.h for details about this structure. The most - interest things are tg_relation->rd_att (descriptor of the relation - tuples) and tg_relation->rd_rel->relname (relation's name. This is not - char*, but NameData. Use SPI_getrelname(tg_relation) to get char* if - you need a copy of name). - -tg_trigtuple - is a pointer to the tuple for which the trigger is fired. This is the tuple - being inserted (if INSERT), deleted (if DELETE) or updated (if UPDATE). - If INSERT/DELETE then this is what you are to return to Executor if - you don't want to replace tuple with another one (INSERT) or skip the - operation. - -tg_newtuple - is a pointer to the new version of tuple if UPDATE and NULL if this is - for an INSERT or a DELETE. This is what you are to return to Executor if - UPDATE and you don't want to replace this tuple with another one or skip - the operation. - -tg_trigger - is pointer to structure Trigger defined in src/include/utils/rel.h: - + + + where the members are defined as follows: + + + + tg_event + + + describes the event for which the function is called. You may use the + following macros to examine tg_event: + + + + TRIGGER_FIRED_BEFORE(tg_event) + + + returns TRUE if trigger fired BEFORE. + + + + + + TRIGGER_FIRED_AFTER(tg_event) + + + Returns TRUE if trigger fired AFTER. + + + + + + TRIGGER_FIRED_FOR_ROW(event) + + + Returns TRUE if trigger fired for + a ROW-level event. + + + + + + TRIGGER_FIRED_FOR_STATEMENT(event) + + + Returns TRUE if trigger fired for + STATEMENT-level event. + + + + + + TRIGGER_FIRED_BY_INSERT(event) + + + Returns TRUE if trigger fired by INSERT. + + + + + + TRIGGER_FIRED_BY_DELETE(event) + + + Returns TRUE if trigger fired by DELETE. + + + + + + TRIGGER_FIRED_BY_UPDATE(event) + + + Returns TRUE if trigger fired by UPDATE. + + + + + + + + + + tg_relation + + + is a pointer to structure describing the triggered relation. Look at + src/include/utils/rel.h for details about this structure. The most + interest things are tg_relation->rd_att (descriptor of the relation + tuples) and tg_relation->rd_rel->relname (relation's name. This is not + char*, but NameData. Use SPI_getrelname(tg_relation) to get char* if + you need a copy of name). + + + + + + tg_trigtuple + + + is a pointer to the tuple for which the trigger is fired. This is the tuple + being inserted (if INSERT), deleted (if DELETE) or updated (if UPDATE). + If INSERT/DELETE then this is what you are to return to Executor if + you don't want to replace tuple with another one (INSERT) or skip the + operation. + + + + + + tg_newtuple + + + is a pointer to the new version of tuple if UPDATE and NULL if this is + for an INSERT or a DELETE. This is what you are to return to Executor if + UPDATE and you don't want to replace this tuple with another one or skip + the operation. + + + + + + tg_trigger + + + is pointer to structure Trigger defined in src/include/utils/rel.h: + + typedef struct Trigger { Oid tgoid; @@ -197,64 +342,72 @@ typedef struct Trigger int16 tgattr[FUNC_MAX_ARGS]; char **tgargs; } Trigger; - - tgname is the trigger's name, tgnargs is number of arguments in tgargs, - tgargs is an array of pointers to the arguments specified in the CREATE - TRIGGER statement. Other members are for internal use only. - - - - - -Visibility of Data Changes - - - Postgres data changes visibility rule: during a query execution, data -changes made by the query itself (via SQL-function, SPI-function, triggers) -are invisible to the query scan. For example, in query - - - INSERT INTO a SELECT * FROM a - - - tuples inserted are invisible for SELECT' scan. In effect, this -duplicates the database table within itself (subject to unique index -rules, of course) without recursing. - - - - But keep in mind this notice about visibility in the SPI documentation: - - - Changes made by query Q are visible by queries which are started after - query Q, no matter whether they are started inside Q (during the - execution of Q) or after Q is done. - - - - - This is true for triggers as well so, though a tuple being inserted -(tg_trigtuple) is not visible to queries in a BEFORE trigger, this tuple -(just inserted) is visible to queries in an AFTER trigger, and to queries -in BEFORE/AFTER triggers fired after this! - - - - -Examples - - - There are more complex examples in in src/test/regress/regress.c and -in contrib/spi. - - - - Here is a very simple example of trigger usage. Function trigf reports -the number of tuples in the triggered relation ttest and skips the -operation if the query attempts to insert NULL into x (i.e - it acts as a -NOT NULL constraint but doesn't abort the transaction). - - + + + where + tgname is the trigger's name, tgnargs is number of arguments in tgargs, + tgargs is an array of pointers to the arguments specified in the CREATE + TRIGGER statement. Other members are for internal use only. + + + + + + + + + Visibility of Data Changes + + + Postgres data changes visibility rule: during a query execution, data + changes made by the query itself (via SQL-function, SPI-function, triggers) + are invisible to the query scan. For example, in query + + +INSERT INTO a SELECT * FROM a; + + + tuples inserted are invisible for SELECT scan. In effect, this + duplicates the database table within itself (subject to unique index + rules, of course) without recursing. + + + + But keep in mind this notice about visibility in the SPI documentation: + +
+ +Changes made by query Q are visible by queries which are started after +query Q, no matter whether they are started inside Q (during the +execution of Q) or after Q is done. + +
+
+ + + This is true for triggers as well so, though a tuple being inserted + (tg_trigtuple) is not visible to queries in a BEFORE trigger, this tuple + (just inserted) is visible to queries in an AFTER trigger, and to queries + in BEFORE/AFTER triggers fired after this! + +
+ + + Examples + + + There are more complex examples in + src/test/regress/regress.c and + in contrib/spi. + + + + Here is a very simple example of trigger usage. Function trigf reports + the number of tuples in the triggered relation ttest and skips the + operation if the query attempts to insert NULL into x (i.e - it acts as a + NOT NULL constraint but doesn't abort the transaction). + + #include "executor/spi.h" /* this is what you need to work with SPI */ #include "commands/trigger.h" /* -"- and triggers */ @@ -317,16 +470,19 @@ trigf() return (rettuple); } - - +
+ - - Now, compile and -create table ttest (x int4); + + Now, compile and + create table ttest (x int4): + + create function trigf () returns opaque as '...path_to_so' language 'c'; + - + vac=> create trigger tbefore before insert or update or delete on ttest for each row execute procedure trigf(); CREATE @@ -395,8 +551,25 @@ vac=> select * from ttest; x - (0 rows) - - - - -
+ + + + +
+ + diff --git a/doc/src/sgml/tutorial.sgml b/doc/src/sgml/tutorial.sgml index cbd6ec6a32..81f860ab4c 100644 --- a/doc/src/sgml/tutorial.sgml +++ b/doc/src/sgml/tutorial.sgml @@ -1,5 +1,5 @@ - PostgreSQL Tutorial - - Covering v6.5 for general release - - - The PostgreSQL Development Team - + PostgreSQL Tutorial + + Covering v7.0 for general release + + + The PostgreSQL Development Team + - - Thomas - Lockhart - - Caltech/JPL - - + + Thomas + Lockhart + + Caltech/JPL + + @@ -52,17 +53,17 @@ thomas 1998-02-23 TGL --> - (last updated 1999-05-19) - + (last updated 2000-05-01) + - - - PostgreSQL is Copyright © 1996-9 - by the Postgres Global Development Group. - - + + + PostgreSQL is Copyright © 1996-2000 + by PostgreSQL Inc. + + - + - - Summary + + Summary - - Postgres, + + Postgres, developed originally in the UC Berkeley Computer Science Department, pioneered many of the object-relational concepts now becoming available in some commercial databases. It provides SQL92/SQL3 language support, transaction integrity, and type extensibility. - PostgreSQL is an open-source descendant + PostgreSQL is an open-source descendant of this original Berkeley code. - - + + &intro; &sql; @@ -105,7 +106,7 @@ Your name here... --> - + diff --git a/doc/src/sgml/user.sgml b/doc/src/sgml/user.sgml index 674cf67f9c..80a29554a1 100644 --- a/doc/src/sgml/user.sgml +++ b/doc/src/sgml/user.sgml @@ -1,5 +1,5 @@ - + - PostgreSQL User's Guide - - Covering v6.5 for general release - - - The PostgreSQL Development Team - + PostgreSQL User's Guide + + Covering v7.0 for general release + + + The PostgreSQL Development Team + - - Thomas - Lockhart - - Caltech/JPL - - + + Thomas + Lockhart + + Caltech/JPL + + @@ -68,17 +68,17 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/user.sgml,v 1.18 2000/03/31 03:27:41 TGL --> - (last updated 1999-06-01) - + (last updated 2000-05-01) + - - - PostgreSQL is Copyright © 1996-9 - by the Postgres Global Development Group. - - + + + PostgreSQL is Copyright © 1996-2000 + by PostgreSQL Inc. + + - + - Summary + Summary - - Postgres, + + Postgres, developed originally in the UC Berkeley Computer Science Department, pioneered many of the object-relational concepts now becoming available in some commercial databases. It provides SQL92/SQL3 language support, transaction integrity, and type extensibility. - PostgreSQL is an open-source descendant + PostgreSQL is an open-source descendant of this original Berkeley code. - - + + &intro; &syntax; @@ -142,7 +142,7 @@ Your name here... --> - + @@ -123,16 +123,16 @@ select function hobbies (EMP) returns set of HOBBIES simply returns a base type, such as int4: - CREATE FUNCTION one() RETURNS int4 - AS 'SELECT 1 as RESULT' LANGUAGE 'sql'; +CREATE FUNCTION one() RETURNS int4 + AS 'SELECT 1 as RESULT' LANGUAGE 'sql'; - SELECT one() AS answer; +SELECT one() AS answer; - +-------+ - |answer | - +-------+ - |1 | - +-------+ + +-------+ + |answer | + +-------+ + |1 | + +-------+ @@ -149,16 +149,16 @@ select function hobbies (EMP) returns set of HOBBIES and $2: - CREATE FUNCTION add_em(int4, int4) RETURNS int4 - AS 'SELECT $1 + $2;' LANGUAGE 'sql'; +CREATE FUNCTION add_em(int4, int4) RETURNS int4 + AS 'SELECT $1 + $2;' LANGUAGE 'sql'; - SELECT add_em(1, 2) AS answer; +SELECT add_em(1, 2) AS answer; - +-------+ - |answer | - +-------+ - |3 | - +-------+ + +-------+ + |answer | + +-------+ + |3 | + +-------+ @@ -175,19 +175,19 @@ select function hobbies (EMP) returns set of HOBBIES salary would be if it were doubled: - CREATE FUNCTION double_salary(EMP) RETURNS int4 - AS 'SELECT $1.salary * 2 AS salary;' LANGUAGE 'sql'; - - SELECT name, double_salary(EMP) AS dream - FROM EMP - WHERE EMP.cubicle ~= '(2,1)'::point; - - - +-----+-------+ - |name | dream | - +-----+-------+ - |Sam | 2400 | - +-----+-------+ +CREATE FUNCTION double_salary(EMP) RETURNS int4 + AS 'SELECT $1.salary * 2 AS salary;' LANGUAGE 'sql'; + +SELECT name, double_salary(EMP) AS dream + FROM EMP + WHERE EMP.cubicle ~= '(2,1)'::point; + + + +-----+-------+ + |name | dream | + +-----+-------+ + |Sam | 2400 | + +-----+-------+ @@ -199,19 +199,19 @@ select function hobbies (EMP) returns set of HOBBIES notation attribute(class) and class.attribute interchangably: - -- - -- this is the same as: - -- SELECT EMP.name AS youngster FROM EMP WHERE EMP.age < 30 - -- - SELECT name(EMP) AS youngster - FROM EMP - WHERE age(EMP) < 30; - - +----------+ - |youngster | - +----------+ - |Sam | - +----------+ +-- +-- this is the same as: +-- SELECT EMP.name AS youngster FROM EMP WHERE EMP.age < 30 +-- +SELECT name(EMP) AS youngster + FROM EMP + WHERE age(EMP) < 30; + + +----------+ + |youngster | + +----------+ + |Sam | + +----------+ @@ -223,12 +223,12 @@ select function hobbies (EMP) returns set of HOBBIES that returns a single EMP instance: - CREATE FUNCTION new_emp() RETURNS EMP - AS 'SELECT \'None\'::text AS name, - 1000 AS salary, - 25 AS age, - \'(2,2)\'::point AS cubicle' - LANGUAGE 'sql'; +CREATE FUNCTION new_emp() RETURNS EMP + AS 'SELECT \'None\'::text AS name, + 1000 AS salary, + 25 AS age, + \'(2,2)\'::point AS cubicle' + LANGUAGE 'sql'; @@ -266,13 +266,13 @@ WARN::function declared to return type EMP does not retrieve (EMP.*) entire instance into another function. - SELECT name(new_emp()) AS nobody; +SELECT name(new_emp()) AS nobody; - +-------+ - |nobody | - +-------+ - |None | - +-------+ + +-------+ + |nobody | + +-------+ + |None | + +-------+ @@ -285,8 +285,8 @@ WARN::function declared to return type EMP does not retrieve (EMP.*) with function calls. - SELECT new_emp().name AS nobody; - WARN:parser: syntax error at or near "." +SELECT new_emp().name AS nobody; +WARN:parser: syntax error at or near "." @@ -303,19 +303,18 @@ WARN::function declared to return type EMP does not retrieve (EMP.*) specified as the function's returntype. - CREATE FUNCTION clean_EMP () RETURNS int4 - AS 'DELETE FROM EMP WHERE EMP.salary <= 0; - SELECT 1 AS ignore_this' - LANGUAGE 'sql'; - - SELECT clean_EMP(); - - +--+ - |x | - +--+ - |1 | - +--+ - +CREATE FUNCTION clean_EMP () RETURNS int4 + AS 'DELETE FROM EMP WHERE EMP.salary <= 0; +SELECT 1 AS ignore_this' + LANGUAGE 'sql'; + +SELECT clean_EMP(); + + +--+ + |x | + +--+ + |1 | + +--+ @@ -688,62 +687,62 @@ memmove(destination->data, buffer, 40); Suppose funcs.c look like: - #include <string.h> - #include "postgres.h" +#include <string.h> +#include "postgres.h" - /* By Value */ - - int - add_one(int arg) - { - return(arg + 1); - } - - /* By Reference, Fixed Length */ +/* By Value */ - Point * - makepoint(Point *pointx, Point *pointy ) - { - Point *new_point = (Point *) palloc(sizeof(Point)); - - new_point->x = pointx->x; - new_point->y = pointy->y; - - return new_point; - } - - /* By Reference, Variable Length */ - - text * - copytext(text *t) - { - /* - * VARSIZE is the total size of the struct in bytes. - */ - text *new_t = (text *) palloc(VARSIZE(t)); - memset(new_t, 0, VARSIZE(t)); - VARSIZE(new_t) = VARSIZE(t); - /* - * VARDATA is a pointer to the data region of the struct. - */ - memcpy((void *) VARDATA(new_t), /* destination */ - (void *) VARDATA(t), /* source */ - VARSIZE(t)-VARHDRSZ); /* how many bytes */ - return(new_t); - } - - text * - concat_text(text *arg1, text *arg2) - { - int32 new_text_size = VARSIZE(arg1) + VARSIZE(arg2) - VARHDRSZ; - text *new_text = (text *) palloc(new_text_size); - - memset((void *) new_text, 0, new_text_size); - VARSIZE(new_text) = new_text_size; - strncpy(VARDATA(new_text), VARDATA(arg1), VARSIZE(arg1)-VARHDRSZ); - strncat(VARDATA(new_text), VARDATA(arg2), VARSIZE(arg2)-VARHDRSZ); - return (new_text); - } +int +add_one(int arg) +{ + return(arg + 1); +} + +/* By Reference, Fixed Length */ + +Point * +makepoint(Point *pointx, Point *pointy ) +{ + Point *new_point = (Point *) palloc(sizeof(Point)); + + new_point->x = pointx->x; + new_point->y = pointy->y; + + return new_point; +} + +/* By Reference, Variable Length */ + +text * +copytext(text *t) +{ + /* + * VARSIZE is the total size of the struct in bytes. + */ + text *new_t = (text *) palloc(VARSIZE(t)); + memset(new_t, 0, VARSIZE(t)); + VARSIZE(new_t) = VARSIZE(t); + /* + * VARDATA is a pointer to the data region of the struct. + */ + memcpy((void *) VARDATA(new_t), /* destination */ + (void *) VARDATA(t), /* source */ + VARSIZE(t)-VARHDRSZ); /* how many bytes */ + return(new_t); +} + +text * +concat_text(text *arg1, text *arg2) +{ + int32 new_text_size = VARSIZE(arg1) + VARSIZE(arg2) - VARHDRSZ; + text *new_text = (text *) palloc(new_text_size); + + memset((void *) new_text, 0, new_text_size); + VARSIZE(new_text) = new_text_size; + strncpy(VARDATA(new_text), VARDATA(arg1), VARSIZE(arg1)-VARHDRSZ); + strncat(VARDATA(new_text), VARDATA(arg2), VARSIZE(arg2)-VARHDRSZ); + return (new_text); +} @@ -751,17 +750,17 @@ memmove(destination->data, buffer, 40); On OSF/1 we would type: - CREATE FUNCTION add_one(int4) RETURNS int4 - AS 'PGROOT/tutorial/funcs.so' LANGUAGE 'c'; - - CREATE FUNCTION makepoint(point, point) RETURNS point - AS 'PGROOT/tutorial/funcs.so' LANGUAGE 'c'; - - CREATE FUNCTION concat_text(text, text) RETURNS text - AS 'PGROOT/tutorial/funcs.so' LANGUAGE 'c'; - - CREATE FUNCTION copytext(text) RETURNS text - AS 'PGROOT/tutorial/funcs.so' LANGUAGE 'c'; +CREATE FUNCTION add_one(int4) RETURNS int4 + AS 'PGROOT/tutorial/funcs.so' LANGUAGE 'c'; + +CREATE FUNCTION makepoint(point, point) RETURNS point + AS 'PGROOT/tutorial/funcs.so' LANGUAGE 'c'; + +CREATE FUNCTION concat_text(text, text) RETURNS text + AS 'PGROOT/tutorial/funcs.so' LANGUAGE 'c'; + +CREATE FUNCTION copytext(text) RETURNS text + AS 'PGROOT/tutorial/funcs.so' LANGUAGE 'c'; @@ -796,20 +795,20 @@ memmove(destination->data, buffer, 40); In the query above, we can define c_overpaid as: - #include "postgres.h" - #include "executor/executor.h" /* for GetAttributeByName() */ - - bool - c_overpaid(TupleTableSlot *t, /* the current instance of EMP */ - int4 limit) - { - bool isnull = false; - int4 salary; - salary = (int4) GetAttributeByName(t, "salary", &isnull); - if (isnull) - return (false); - return(salary > limit); - } +#include "postgres.h" +#include "executor/executor.h" /* for GetAttributeByName() */ + +bool +c_overpaid(TupleTableSlot *t, /* the current instance of EMP */ + int4 limit) +{ + bool isnull = false; + int4 salary; + salary = (int4) GetAttributeByName(t, "salary", &isnull); + if (isnull) + return (false); + return(salary > limit); +} @@ -827,9 +826,9 @@ memmove(destination->data, buffer, 40); call would look like: - char *str; - ... - str = (char *) GetAttributeByName(t, "name", &isnull) +char *str; +... +str = (char *) GetAttributeByName(t, "name", &isnull) @@ -838,8 +837,8 @@ memmove(destination->data, buffer, 40); know about the c_overpaid function: - * CREATE FUNCTION c_overpaid(EMP, int4) RETURNS bool - AS 'PGROOT/tutorial/obj/funcs.so' LANGUAGE 'c'; +* CREATE FUNCTION c_overpaid(EMP, int4) RETURNS bool + AS 'PGROOT/tutorial/obj/funcs.so' LANGUAGE 'c'; diff --git a/doc/src/sgml/xtypes.sgml b/doc/src/sgml/xtypes.sgml index 4e691ca319..62644ad6e6 100644 --- a/doc/src/sgml/xtypes.sgml +++ b/doc/src/sgml/xtypes.sgml @@ -1,20 +1,20 @@ - -Extending <Acronym>SQL</Acronym>: Types - - As previously mentioned, there are two kinds of types - in Postgres: base types (defined in a programming language) - and composite types (instances). - Examples in this section up to interfacing indices can - be found in complex.sql and complex.c. Composite examples - are in funcs.sql. - - - -User-Defined Types - - -Functions Needed for a User-Defined Type - + + Extending <acronym>SQL</acronym>: Types + + As previously mentioned, there are two kinds of types + in Postgres: base types (defined in a programming language) + and composite types (instances). + Examples in this section up to interfacing indices can + be found in complex.sql and complex.c. Composite examples + are in funcs.sql. + + + + User-Defined Types + + + Functions Needed for a User-Defined Type + A user-defined type must always have input and output functions. These functions determine how the type appears in strings (for input by the user and output to @@ -26,124 +26,152 @@ delimited character string. Suppose we want to define a complex type which represents complex numbers. Naturally, we choose to represent a - complex in memory as the following C structure: - - typedef struct Complex { - double x; - double y; - } Complex; - + complex in memory as the following C structure: + + +typedef struct Complex { + double x; + double y; +} Complex; + + and a string of the form (x,y) as the external string representation. These functions are usually not hard to write, especially the output function. However, there are a number of points to remember: - - - When defining your external (string) representation, - remember that you must eventually write a - complete and robust parser for that representation - as your input function! - - Complex * - complex_in(char *str) - { - double x, y; - Complex *result; - if (sscanf(str, " ( %lf , %lf )", &x, &y) != 2) { - elog(WARN, "complex_in: error in parsing - return NULL; - } - result = (Complex *)palloc(sizeof(Complex)); - result->x = x; - result->y = y; - return (result); - } - - - The output function can simply be: - - char * - complex_out(Complex *complex) - { - char *result; - if (complex == NULL) - return(NULL); - result = (char *) palloc(60); - sprintf(result, "(%g,%g)", complex->x, complex->y); - return(result); - } - - - - - You should try to make the input and output - functions inverses of each other. If you do - not, you will have severe problems when you need - to dump your data into a file and then read it - back in (say, into someone else's database on - another computer). This is a particularly common - problem when floating-point numbers are - involved. - - - - - - To define the complex type, we need to create the two + + + When defining your external (string) representation, + remember that you must eventually write a + complete and robust parser for that representation + as your input function! + + +Complex * +complex_in(char *str) +{ + double x, y; + Complex *result; + if (sscanf(str, " ( %lf , %lf )", &x, &y) != 2) { + elog(WARN, "complex_in: error in parsing + return NULL; + } + result = (Complex *)palloc(sizeof(Complex)); + result->x = x; + result->y = y; + return (result); +} + + + The output function can simply be: + + +char * +complex_out(Complex *complex) +{ + char *result; + if (complex == NULL) + return(NULL); + result = (char *) palloc(60); + sprintf(result, "(%g,%g)", complex->x, complex->y); + return(result); +} + + + + + + + You should try to make the input and output + functions inverses of each other. If you do + not, you will have severe problems when you need + to dump your data into a file and then read it + back in (say, into someone else's database on + another computer). This is a particularly common + problem when floating-point numbers are + involved. + + + + + + To define the complex type, we need to create the two user-defined functions complex_in and complex_out before creating the type: - - CREATE FUNCTION complex_in(opaque) - RETURNS complex - AS 'PGROOT/tutorial/obj/complex.so' - LANGUAGE 'c'; - - CREATE FUNCTION complex_out(opaque) - RETURNS opaque - AS 'PGROOT/tutorial/obj/complex.so' - LANGUAGE 'c'; - - CREATE TYPE complex ( - internallength = 16, - input = complex_in, - output = complex_out - ); - - - - - As discussed earlier, Postgres fully supports arrays of - base types. Additionally, Postgres supports arrays of + + +CREATE FUNCTION complex_in(opaque) + RETURNS complex + AS 'PGROOT/tutorial/obj/complex.so' + LANGUAGE 'c'; + +CREATE FUNCTION complex_out(opaque) + RETURNS opaque + AS 'PGROOT/tutorial/obj/complex.so' + LANGUAGE 'c'; + +CREATE TYPE complex ( + internallength = 16, + input = complex_in, + output = complex_out +); + + + + + As discussed earlier, Postgres fully supports arrays of + base types. Additionally, Postgres supports arrays of user-defined types as well. When you define a type, - Postgres automatically provides support for arrays of + Postgres automatically provides support for arrays of that type. For historical reasons, the array type has the same name as the user-defined type with the underscore character _ prepended. Composite types do not need any function defined on them, since the system already understands what they look like inside. - - - -Large Objects + + + + + Large Objects - + The types discussed to this point are all "small" objects -- that is, they are smaller than 8KB in size. - - - 1024 longwords == 8192 bytes. In fact, the type must be considerably smaller than 8192 bytes, - since the Postgres tuple -and page overhead must also fit into this 8KB limitation. -The actual value that fits depends on the machine architecture. - - + + + 1024 longwords == 8192 bytes. In fact, the type must be considerably smaller than 8192 bytes, + since the Postgres tuple + and page overhead must also fit into this 8KB limitation. + The actual value that fits depends on the machine architecture. + + If you require a larger type for something like a document retrieval system or for storing bitmaps, you will - need to use the Postgres large object interface. - - - - + need to use the Postgres large object + interface, or will need to recompile the + Postgres backend to use internal + storage blocks greater than 8kbytes.. + + + + + + diff --git a/doc/src/sgml/y2k.sgml b/doc/src/sgml/y2k.sgml index 75257cbc23..a0028668a3 100644 --- a/doc/src/sgml/y2k.sgml +++ b/doc/src/sgml/y2k.sgml @@ -1,5 +1,5 @@ @@ -52,8 +52,8 @@ $Header: /cvsroot/pgsql/doc/src/sgml/Attic/y2k.sgml,v 1.6 2000/04/07 13:30:58 th User's Guide in the chapter on data types. For two-digit years, the significant transition year is 1970, not 2000; - e.g. 70-01-01 is interpreted as 1970-01-01, - whereas 69-01-01 is interpreted as 2069-01-01. + e.g. "70-01-01" is interpreted as 1970-01-01, + whereas "69-01-01" is interpreted as 2069-01-01. -- 2.11.0