*** ./src/interfaces/jdbc/org/postgresql/Connection.java.orig Sat Sep 8 23:26:04 2001
--- ./src/interfaces/jdbc/org/postgresql/Connection.java Sat Sep 8 23:21:48 2001
***************
*** 22,36 ****
// This is the network stream associated with this connection
public PG_Stream pg_stream;
- // This is set by org.postgresql.Statement.setMaxRows()
- //public int maxrows = 0; // maximum no. of rows; 0 = unlimited
-
private String PG_HOST;
private int PG_PORT;
private String PG_USER;
private String PG_PASSWORD;
private String PG_DATABASE;
private boolean PG_STATUS;
/**
* The encoding to use for this connection.
--- 22,34 ----
// This is the network stream associated with this connection
public PG_Stream pg_stream;
private String PG_HOST;
private int PG_PORT;
private String PG_USER;
private String PG_PASSWORD;
private String PG_DATABASE;
private boolean PG_STATUS;
+ private String compatible;
/**
* The encoding to use for this connection.
***************
*** 123,128 ****
--- 121,131 ----
PG_PORT = port;
PG_HOST = host;
PG_STATUS = CONNECTION_BAD;
+ if(info.getProperty("compatible")==null) {
+ compatible = d.getMajorVersion() + "." + d.getMinorVersion();
+ } else {
+ compatible = info.getProperty("compatible");
+ }
// Now make the initial connection
try
***************
*** 966,971 ****
--- 969,991 ----
public boolean haveMinimumServerVersion(String ver) throws SQLException
{
return (getDBVersionNumber().compareTo(ver) >= 0);
+ }
+
+ /**
+ * This method returns true if the compatible level set in the connection
+ * (which can be passed into the connection or specified in the URL)
+ * is at least the value passed to this method. This is used to toggle
+ * between different functionality as it changes across different releases
+ * of the jdbc driver code. The values here are versions of the jdbc client
+ * and not server versions. For example in 7.1 get/setBytes worked on
+ * LargeObject values, in 7.2 these methods were changed to work on bytea
+ * values. This change in functionality could be disabled by setting the
+ * "compatible" level to be 7.1, in which case the driver will revert to
+ * the 7.1 functionality.
+ */
+ public boolean haveMinimumCompatibleVersion(String ver) throws SQLException
+ {
+ return (compatible.compareTo(ver) >= 0);
}
*** ./src/interfaces/jdbc/org/postgresql/Driver.java.in.orig Sat Sep 8 23:09:58 2001
--- ./src/interfaces/jdbc/org/postgresql/Driver.java.in Sat Sep 8 23:08:04 2001
***************
*** 85,96 ****
* database.
*
*
The java.util.Properties argument can be used to pass arbitrary
! * string tag/value pairs as connection arguments. Normally, at least
* "user" and "password" properties should be included in the
! * properties. In addition, the "charSet" property can be used to
! * set a character set encoding (e.g. "utf-8") other than the platform
! * default (typically Latin1). This is necessary in particular if storing
! * multibyte characters in the database. For a list of supported
* character encoding , see
* http://java.sun.com/products/jdk/1.2/docs/guide/internat/encoding.doc.html
* Note that you will probably want to have set up the Postgres database
--- 85,110 ----
* database.
*
*
The java.util.Properties argument can be used to pass arbitrary
! * string tag/value pairs as connection arguments.
! *
! * user - (optional) The user to connect as
! * password - (optional) The password for the user
! * charSet - (optional) The character set to be used for converting
! * to/from the database to unicode. If multibyte is enabled on the
! * server then the character set of the database is used as the default,
! * otherwise the jvm character encoding is used as the default.
! * compatible - This is used to toggle
! * between different functionality as it changes across different releases
! * of the jdbc driver code. The values here are versions of the jdbc
! * client and not server versions. For example in 7.1 get/setBytes
! * worked on LargeObject values, in 7.2 these methods were changed
! * to work on bytea values. This change in functionality could
! * be disabled by setting the compatible level to be "7.1", in
! * which case the driver will revert to the 7.1 functionality.
! *
! *
Normally, at least
* "user" and "password" properties should be included in the
! * properties. For a list of supported
* character encoding, see
* http://java.sun.com/products/jdk/1.2/docs/guide/internat/encoding.doc.html
* Note that you will probably want to have set up the Postgres database
*** ./src/interfaces/jdbc/org/postgresql/jdbc1/Connection.java.orig Sat Sep 8 23:55:32 2001
--- ./src/interfaces/jdbc/org/postgresql/jdbc1/Connection.java Sat Sep 8 23:57:32 2001
***************
*** 174,179 ****
--- 174,180 ----
"float8",
"bpchar","char","char2","char4","char8","char16",
"varchar","text","name","filename",
+ "bytea",
"bool",
"date",
"time",
***************
*** 197,202 ****
--- 198,204 ----
Types.DOUBLE,
Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,
Types.VARCHAR,Types.VARCHAR,Types.VARCHAR,Types.VARCHAR,
+ Types.BINARY,
Types.BIT,
Types.DATE,
Types.TIME,
*** ./src/interfaces/jdbc/org/postgresql/jdbc1/PreparedStatement.java.orig Sun Sep 9 00:01:02 2001
--- ./src/interfaces/jdbc/org/postgresql/jdbc1/PreparedStatement.java Sat Sep 8 14:37:14 2001
***************
*** 82,88 ****
* A Prepared SQL query is executed and its ResultSet is returned
*
* @return a ResultSet that contains the data produced by the
! * query - never null
* @exception SQLException if a database access error occurs
*/
public java.sql.ResultSet executeQuery() throws SQLException
--- 82,88 ----
* A Prepared SQL query is executed and its ResultSet is returned
*
* @return a ResultSet that contains the data produced by the
! * * query - never null
* @exception SQLException if a database access error occurs
*/
public java.sql.ResultSet executeQuery() throws SQLException
***************
*** 107,113 ****
* be executed.
*
* @return either the row count for INSERT, UPDATE or DELETE; or
! * 0 for SQL statements that return nothing.
* @exception SQLException if a database access error occurs
*/
public int executeUpdate() throws SQLException
--- 107,113 ----
* be executed.
*
* @return either the row count for INSERT, UPDATE or DELETE; or
! * * 0 for SQL statements that return nothing.
* @exception SQLException if a database access error occurs
*/
public int executeUpdate() throws SQLException
***************
*** 294,299 ****
--- 294,308 ----
*/
public void setBytes(int parameterIndex, byte x[]) throws SQLException
{
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports the bytea datatype for byte arrays
+ if(null == x){
+ setNull(parameterIndex,Types.OTHER);
+ } else {
+ setString(parameterIndex, PGbytea.toPGString(x));
+ }
+ } else {
+ //Version 7.1 and earlier support done as LargeObjects
LargeObjectManager lom = connection.getLargeObjectAPI();
int oid = lom.create();
LargeObject lob = lom.open(oid);
***************
*** 301,306 ****
--- 310,316 ----
lob.close();
setInt(parameterIndex,oid);
}
+ }
/**
* Set a parameter to a java.sql.Date value. The driver converts this
***************
*** 386,393 ****
--- 396,424 ----
*/
public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException
{
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports AsciiStream for all PG text types (char, varchar, text)
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large String values (i.e. LONGVARCHAR) PG doesn't have a separate
+ //long varchar datatype, but with toast all text datatypes are capable of
+ //handling very large values. Thus the implementation ends up calling
+ //setString() since there is no current way to stream the value to the server
+ try {
+ InputStreamReader l_inStream = new InputStreamReader(x, "ASCII");
+ char[] l_chars = new char[length];
+ int l_charsRead = l_inStream.read(l_chars,0,length);
+ setString(parameterIndex, new String(l_chars,0,l_charsRead));
+ } catch (UnsupportedEncodingException l_uee) {
+ throw new PSQLException("postgresql.unusual",l_uee);
+ } catch (IOException l_ioe) {
+ throw new PSQLException("postgresql.unusual",l_ioe);
+ }
+ } else {
+ //Version 7.1 supported only LargeObjects by treating everything
+ //as binary data
setBinaryStream(parameterIndex, x, length);
}
+ }
/**
* When a very large Unicode value is input to a LONGVARCHAR parameter,
***************
*** 406,413 ****
--- 437,465 ----
*/
public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException
{
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports AsciiStream for all PG text types (char, varchar, text)
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large String values (i.e. LONGVARCHAR) PG doesn't have a separate
+ //long varchar datatype, but with toast all text datatypes are capable of
+ //handling very large values. Thus the implementation ends up calling
+ //setString() since there is no current way to stream the value to the server
+ try {
+ InputStreamReader l_inStream = new InputStreamReader(x, "UTF-8");
+ char[] l_chars = new char[length];
+ int l_charsRead = l_inStream.read(l_chars,0,length);
+ setString(parameterIndex, new String(l_chars,0,l_charsRead));
+ } catch (UnsupportedEncodingException l_uee) {
+ throw new PSQLException("postgresql.unusual",l_uee);
+ } catch (IOException l_ioe) {
+ throw new PSQLException("postgresql.unusual",l_ioe);
+ }
+ } else {
+ //Version 7.1 supported only LargeObjects by treating everything
+ //as binary data
setBinaryStream(parameterIndex, x, length);
}
+ }
/**
* When a very large binary value is input to a LONGVARBINARY parameter,
***************
*** 425,431 ****
*/
public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException
{
! throw org.postgresql.Driver.notImplemented();
}
/**
--- 477,530 ----
*/
public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException
{
! if (connection.haveMinimumCompatibleVersion("7.2")) {
! //Version 7.2 supports BinaryStream for for the PG bytea type
! //As the spec/javadoc for this method indicate this is to be used for
! //large binary values (i.e. LONGVARBINARY) PG doesn't have a separate
! //long binary datatype, but with toast the bytea datatype is capable of
! //handling very large values. Thus the implementation ends up calling
! //setBytes() since there is no current way to stream the value to the server
! byte[] l_bytes = new byte[length];
! int l_bytesRead;
! try {
! l_bytesRead = x.read(l_bytes,0,length);
! } catch (IOException l_ioe) {
! throw new PSQLException("postgresql.unusual",l_ioe);
! }
! if (l_bytesRead == length) {
! setBytes(parameterIndex, l_bytes);
! } else {
! //the stream contained less data than they said
! byte[] l_bytes2 = new byte[l_bytesRead];
! System.arraycopy(l_bytes,0,l_bytes2,0,l_bytesRead);
! setBytes(parameterIndex, l_bytes2);
! }
! } else {
! //Version 7.1 only supported streams for LargeObjects
! //but the jdbc spec indicates that streams should be
! //available for LONGVARBINARY instead
! LargeObjectManager lom = connection.getLargeObjectAPI();
! int oid = lom.create();
! LargeObject lob = lom.open(oid);
! OutputStream los = lob.getOutputStream();
! try {
! // could be buffered, but then the OutputStream returned by LargeObject
! // is buffered internally anyhow, so there would be no performance
! // boost gained, if anything it would be worse!
! int c=x.read();
! int p=0;
! while(c>-1 && p fields.length)
throw new PSQLException("postgresql.res.colrange");
- wasNullFlag = (this_row[columnIndex - 1] == null);
// Handle OID's as BLOBS
! if(!wasNullFlag)
if( fields[columnIndex - 1].getOID() == 26) {
LargeObjectManager lom = connection.getLargeObjectAPI();
LargeObject lob = lom.open(getInt(columnIndex));
--- 374,388 ----
{
if (columnIndex < 1 || columnIndex > fields.length)
throw new PSQLException("postgresql.res.colrange");
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports the bytea datatype for byte arrays
+ return PGbytea.toBytes(getString(columnIndex));
+ } else {
+ //Version 7.1 and earlier supports LargeObjects for byte arrays
+ wasNullFlag = (this_row[columnIndex - 1] == null);
// Handle OID's as BLOBS
! if(!wasNullFlag) {
if( fields[columnIndex - 1].getOID() == 26) {
LargeObjectManager lom = connection.getLargeObjectAPI();
LargeObject lob = lom.open(getInt(columnIndex));
***************
*** 385,392 ****
lob.close();
return buf;
}
!
! return this_row[columnIndex - 1];
}
/**
--- 390,398 ----
lob.close();
return buf;
}
! }
! }
! return null;
}
/**
***************
*** 545,552 ****
--- 551,577 ----
*/
public InputStream getAsciiStream(int columnIndex) throws SQLException
{
+ wasNullFlag = (this_row[columnIndex - 1] == null);
+ if (wasNullFlag)
+ return null;
+
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports AsciiStream for all the PG text types
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large text values (i.e. LONGVARCHAR) PG doesn't have a separate
+ //long string datatype, but with toast the text datatype is capable of
+ //handling very large values. Thus the implementation ends up calling
+ //getString() since there is no current way to stream the value from the server
+ try {
+ return new ByteArrayInputStream(getString(columnIndex).getBytes("ASCII"));
+ } catch (UnsupportedEncodingException l_uee) {
+ throw new PSQLException("postgresql.unusual", l_uee);
+ }
+ } else {
+ // In 7.1 Handle as BLOBS so return the LargeObject input stream
return getBinaryStream(columnIndex);
}
+ }
/**
* A column value can also be retrieved as a stream of Unicode
***************
*** 562,569 ****
--- 587,613 ----
*/
public InputStream getUnicodeStream(int columnIndex) throws SQLException
{
+ wasNullFlag = (this_row[columnIndex - 1] == null);
+ if (wasNullFlag)
+ return null;
+
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports AsciiStream for all the PG text types
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large text values (i.e. LONGVARCHAR) PG doesn't have a separate
+ //long string datatype, but with toast the text datatype is capable of
+ //handling very large values. Thus the implementation ends up calling
+ //getString() since there is no current way to stream the value from the server
+ try {
+ return new ByteArrayInputStream(getString(columnIndex).getBytes("UTF-8"));
+ } catch (UnsupportedEncodingException l_uee) {
+ throw new PSQLException("postgresql.unusual", l_uee);
+ }
+ } else {
+ // In 7.1 Handle as BLOBS so return the LargeObject input stream
return getBinaryStream(columnIndex);
}
+ }
/**
* A column value can also be retrieved as a binary strea. This
***************
*** 579,589 ****
*/
public InputStream getBinaryStream(int columnIndex) throws SQLException
{
! byte b[] = getBytes(columnIndex);
if (b != null)
return new ByteArrayInputStream(b);
! return null; // SQL NULL
}
/**
--- 623,651 ----
*/
public InputStream getBinaryStream(int columnIndex) throws SQLException
{
! wasNullFlag = (this_row[columnIndex - 1] == null);
! if (wasNullFlag)
! return null;
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports BinaryStream for all PG bytea type
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large binary values (i.e. LONGVARBINARY) PG doesn't have a separate
+ //long binary datatype, but with toast the bytea datatype is capable of
+ //handling very large values. Thus the implementation ends up calling
+ //getBytes() since there is no current way to stream the value from the server
+ byte b[] = getBytes(columnIndex);
if (b != null)
return new ByteArrayInputStream(b);
! } else {
! // In 7.1 Handle as BLOBS so return the LargeObject input stream
! if( fields[columnIndex - 1].getOID() == 26) {
! LargeObjectManager lom = connection.getLargeObjectAPI();
! LargeObject lob = lom.open(getInt(columnIndex));
! return lob.getInputStream();
! }
! }
! return null;
}
/**
*** ./src/interfaces/jdbc/org/postgresql/jdbc2/Connection.java.orig Sat Sep 8 23:29:16 2001
--- ./src/interfaces/jdbc/org/postgresql/jdbc2/Connection.java Sat Sep 8 23:33:29 2001
***************
*** 291,302 ****
"float8",
"bpchar","char","char2","char4","char8","char16",
"varchar","text","name","filename",
"bool",
"date",
"time",
"abstime","timestamp",
! "_bool", "_char", "_int2", "_int4", "_text", "_oid", "_varchar", "_int8",
! "_float4", "_float8", "_abstime", "_date", "_time", "_timestamp", "_numeric"
};
/**
--- 291,305 ----
"float8",
"bpchar","char","char2","char4","char8","char16",
"varchar","text","name","filename",
+ "bytea",
"bool",
"date",
"time",
"abstime","timestamp",
! "_bool", "_char", "_int2", "_int4", "_text",
! "_oid", "_varchar", "_int8", "_float4", "_float8",
! "_abstime", "_date", "_time", "_timestamp", "_numeric",
! "_bytea"
};
/**
***************
*** 316,327 ****
Types.DOUBLE,
Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,
Types.VARCHAR,Types.VARCHAR,Types.VARCHAR,Types.VARCHAR,
Types.BIT,
Types.DATE,
Types.TIME,
Types.TIMESTAMP,Types.TIMESTAMP,
! Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY,
! Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY
};
--- 319,333 ----
Types.DOUBLE,
Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,
Types.VARCHAR,Types.VARCHAR,Types.VARCHAR,Types.VARCHAR,
+ Types.BINARY,
Types.BIT,
Types.DATE,
Types.TIME,
Types.TIMESTAMP,Types.TIMESTAMP,
! Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY,
! Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY,
! Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY,
! Types.ARRAY
};
*** ./src/interfaces/jdbc/org/postgresql/jdbc2/PreparedStatement.java.orig Sat Sep 8 23:35:02 2001
--- ./src/interfaces/jdbc/org/postgresql/jdbc2/PreparedStatement.java Sat Sep 8 14:21:29 2001
***************
*** 91,97 ****
* A Prepared SQL query is executed and its ResultSet is returned
*
* @return a ResultSet that contains the data produced by the
! * query - never null
* @exception SQLException if a database access error occurs
*/
public java.sql.ResultSet executeQuery() throws SQLException
--- 91,97 ----
* A Prepared SQL query is executed and its ResultSet is returned
*
* @return a ResultSet that contains the data produced by the
! * * query - never null
* @exception SQLException if a database access error occurs
*/
public java.sql.ResultSet executeQuery() throws SQLException
***************
*** 105,111 ****
* be executed.
*
* @return either the row count for INSERT, UPDATE or DELETE; or
! * 0 for SQL statements that return nothing.
* @exception SQLException if a database access error occurs
*/
public int executeUpdate() throws SQLException
--- 105,111 ----
* be executed.
*
* @return either the row count for INSERT, UPDATE or DELETE; or
! * * 0 for SQL statements that return nothing.
* @exception SQLException if a database access error occurs
*/
public int executeUpdate() throws SQLException
***************
*** 305,310 ****
--- 305,319 ----
*/
public void setBytes(int parameterIndex, byte x[]) throws SQLException
{
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports the bytea datatype for byte arrays
+ if(null == x){
+ setNull(parameterIndex,Types.OTHER);
+ } else {
+ setString(parameterIndex, PGbytea.toPGString(x));
+ }
+ } else {
+ //Version 7.1 and earlier support done as LargeObjects
LargeObjectManager lom = connection.getLargeObjectAPI();
int oid = lom.create();
LargeObject lob = lom.open(oid);
***************
*** 312,317 ****
--- 321,327 ----
lob.close();
setInt(parameterIndex,oid);
}
+ }
/**
* Set a parameter to a java.sql.Date value. The driver converts this
***************
*** 413,420 ****
--- 423,451 ----
*/
public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException
{
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports AsciiStream for all PG text types (char, varchar, text)
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large String values (i.e. LONGVARCHAR) PG doesn't have a separate
+ //long varchar datatype, but with toast all text datatypes are capable of
+ //handling very large values. Thus the implementation ends up calling
+ //setString() since there is no current way to stream the value to the server
+ try {
+ InputStreamReader l_inStream = new InputStreamReader(x, "ASCII");
+ char[] l_chars = new char[length];
+ int l_charsRead = l_inStream.read(l_chars,0,length);
+ setString(parameterIndex, new String(l_chars,0,l_charsRead));
+ } catch (UnsupportedEncodingException l_uee) {
+ throw new PSQLException("postgresql.unusual",l_uee);
+ } catch (IOException l_ioe) {
+ throw new PSQLException("postgresql.unusual",l_ioe);
+ }
+ } else {
+ //Version 7.1 supported only LargeObjects by treating everything
+ //as binary data
setBinaryStream(parameterIndex, x, length);
}
+ }
/**
* When a very large Unicode value is input to a LONGVARCHAR parameter,
***************
*** 436,443 ****
--- 467,495 ----
*/
public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException
{
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports AsciiStream for all PG text types (char, varchar, text)
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large String values (i.e. LONGVARCHAR) PG doesn't have a separate
+ //long varchar datatype, but with toast all text datatypes are capable of
+ //handling very large values. Thus the implementation ends up calling
+ //setString() since there is no current way to stream the value to the server
+ try {
+ InputStreamReader l_inStream = new InputStreamReader(x, "UTF-8");
+ char[] l_chars = new char[length];
+ int l_charsRead = l_inStream.read(l_chars,0,length);
+ setString(parameterIndex, new String(l_chars,0,l_charsRead));
+ } catch (UnsupportedEncodingException l_uee) {
+ throw new PSQLException("postgresql.unusual",l_uee);
+ } catch (IOException l_ioe) {
+ throw new PSQLException("postgresql.unusual",l_ioe);
+ }
+ } else {
+ //Version 7.1 supported only LargeObjects by treating everything
+ //as binary data
setBinaryStream(parameterIndex, x, length);
}
+ }
/**
* When a very large binary value is input to a LONGVARBINARY parameter,
***************
*** 455,460 ****
--- 507,538 ----
*/
public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException
{
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports BinaryStream for for the PG bytea type
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large binary values (i.e. LONGVARBINARY) PG doesn't have a separate
+ //long binary datatype, but with toast the bytea datatype is capable of
+ //handling very large values. Thus the implementation ends up calling
+ //setBytes() since there is no current way to stream the value to the server
+ byte[] l_bytes = new byte[length];
+ int l_bytesRead;
+ try {
+ l_bytesRead = x.read(l_bytes,0,length);
+ } catch (IOException l_ioe) {
+ throw new PSQLException("postgresql.unusual",l_ioe);
+ }
+ if (l_bytesRead == length) {
+ setBytes(parameterIndex, l_bytes);
+ } else {
+ //the stream contained less data than they said
+ byte[] l_bytes2 = new byte[l_bytesRead];
+ System.arraycopy(l_bytes,0,l_bytes2,0,l_bytesRead);
+ setBytes(parameterIndex, l_bytes2);
+ }
+ } else {
+ //Version 7.1 only supported streams for LargeObjects
+ //but the jdbc spec indicates that streams should be
+ //available for LONGVARBINARY instead
LargeObjectManager lom = connection.getLargeObjectAPI();
int oid = lom.create();
LargeObject lob = lom.open(oid);
***************
*** 472,482 ****
}
los.close();
} catch(IOException se) {
! throw new PSQLException("postgresql.prep.is",se);
}
// lob is closed by the stream so don't call lob.close()
setInt(parameterIndex,oid);
}
/**
* In general, parameter values remain in force for repeated used of a
--- 550,561 ----
}
los.close();
} catch(IOException se) {
! throw new PSQLException("postgresql.unusual",se);
}
// lob is closed by the stream so don't call lob.close()
setInt(parameterIndex,oid);
}
+ }
/**
* In general, parameter values remain in force for repeated used of a
***************
*** 728,738 ****
}
/**
! * Sets a Blob - basically its similar to setBinaryStream()
*/
public void setBlob(int i,Blob x) throws SQLException
{
! setBinaryStream(i,x.getBinaryStream(),(int)x.length());
}
/**
--- 807,839 ----
}
/**
! * Sets a Blob
*/
public void setBlob(int i,Blob x) throws SQLException
{
! InputStream l_inStream = x.getBinaryStream();
! int l_length = (int) x.length();
! LargeObjectManager lom = connection.getLargeObjectAPI();
! int oid = lom.create();
! LargeObject lob = lom.open(oid);
! OutputStream los = lob.getOutputStream();
! try {
! // could be buffered, but then the OutputStream returned by LargeObject
! // is buffered internally anyhow, so there would be no performance
! // boost gained, if anything it would be worse!
! int c=l_inStream.read();
! int p=0;
! while(c>-1 && p-1 && p fields.length)
throw new PSQLException("postgresql.res.colrange");
- wasNullFlag = (this_row[columnIndex - 1] == null);
// Handle OID's as BLOBS
! if(!wasNullFlag)
if( fields[columnIndex - 1].getOID() == 26) {
LargeObjectManager lom = connection.getLargeObjectAPI();
LargeObject lob = lom.open(getInt(columnIndex));
--- 312,326 ----
{
if (columnIndex < 1 || columnIndex > fields.length)
throw new PSQLException("postgresql.res.colrange");
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports the bytea datatype for byte arrays
+ return PGbytea.toBytes(getString(columnIndex));
+ } else {
+ //Version 7.1 and earlier supports LargeObjects for byte arrays
+ wasNullFlag = (this_row[columnIndex - 1] == null);
// Handle OID's as BLOBS
! if(!wasNullFlag) {
if( fields[columnIndex - 1].getOID() == 26) {
LargeObjectManager lom = connection.getLargeObjectAPI();
LargeObject lob = lom.open(getInt(columnIndex));
***************
*** 323,330 ****
lob.close();
return buf;
}
!
! return this_row[columnIndex - 1];
}
/**
--- 328,336 ----
lob.close();
return buf;
}
! }
! }
! return null;
}
/**
***************
*** 392,399 ****
--- 398,424 ----
*/
public InputStream getAsciiStream(int columnIndex) throws SQLException
{
+ wasNullFlag = (this_row[columnIndex - 1] == null);
+ if (wasNullFlag)
+ return null;
+
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports AsciiStream for all the PG text types
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large text values (i.e. LONGVARCHAR) PG doesn't have a separate
+ //long string datatype, but with toast the text datatype is capable of
+ //handling very large values. Thus the implementation ends up calling
+ //getString() since there is no current way to stream the value from the server
+ try {
+ return new ByteArrayInputStream(getString(columnIndex).getBytes("ASCII"));
+ } catch (UnsupportedEncodingException l_uee) {
+ throw new PSQLException("postgresql.unusual", l_uee);
+ }
+ } else {
+ // In 7.1 Handle as BLOBS so return the LargeObject input stream
return getBinaryStream(columnIndex);
}
+ }
/**
* A column value can also be retrieved as a stream of Unicode
***************
*** 412,419 ****
--- 437,463 ----
*/
public InputStream getUnicodeStream(int columnIndex) throws SQLException
{
+ wasNullFlag = (this_row[columnIndex - 1] == null);
+ if (wasNullFlag)
+ return null;
+
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports AsciiStream for all the PG text types
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large text values (i.e. LONGVARCHAR) PG doesn't have a separate
+ //long string datatype, but with toast the text datatype is capable of
+ //handling very large values. Thus the implementation ends up calling
+ //getString() since there is no current way to stream the value from the server
+ try {
+ return new ByteArrayInputStream(getString(columnIndex).getBytes("UTF-8"));
+ } catch (UnsupportedEncodingException l_uee) {
+ throw new PSQLException("postgresql.unusual", l_uee);
+ }
+ } else {
+ // In 7.1 Handle as BLOBS so return the LargeObject input stream
return getBinaryStream(columnIndex);
}
+ }
/**
* A column value can also be retrieved as a binary strea. This
***************
*** 429,448 ****
*/
public InputStream getBinaryStream(int columnIndex) throws SQLException
{
! // New in 7.1 Handle OID's as BLOBS so return the input stream
! if(!wasNullFlag)
if( fields[columnIndex - 1].getOID() == 26) {
LargeObjectManager lom = connection.getLargeObjectAPI();
LargeObject lob = lom.open(getInt(columnIndex));
return lob.getInputStream();
}
!
! // Not an OID so fake the stream
! byte b[] = getBytes(columnIndex);
!
! if (b != null)
! return new ByteArrayInputStream(b);
! return null; // SQL NULL
}
/**
--- 473,501 ----
*/
public InputStream getBinaryStream(int columnIndex) throws SQLException
{
! wasNullFlag = (this_row[columnIndex - 1] == null);
! if (wasNullFlag)
! return null;
!
! if (connection.haveMinimumCompatibleVersion("7.2")) {
! //Version 7.2 supports BinaryStream for all PG bytea type
! //As the spec/javadoc for this method indicate this is to be used for
! //large binary values (i.e. LONGVARBINARY) PG doesn't have a separate
! //long binary datatype, but with toast the bytea datatype is capable of
! //handling very large values. Thus the implementation ends up calling
! //getBytes() since there is no current way to stream the value from the server
! byte b[] = getBytes(columnIndex);
! if (b != null)
! return new ByteArrayInputStream(b);
! } else {
! // In 7.1 Handle as BLOBS so return the LargeObject input stream
if( fields[columnIndex - 1].getOID() == 26) {
LargeObjectManager lom = connection.getLargeObjectAPI();
LargeObject lob = lom.open(getInt(columnIndex));
return lob.getInputStream();
}
! }
! return null;
}
/**
***************
*** 731,737 ****
//if index<0, count from the end of the result set, but check
//to be sure that it is not beyond the first index
if (index<0)
! if (index > -rows_size)
internalIndex = rows_size+index;
else {
beforeFirst();
--- 784,790 ----
//if index<0, count from the end of the result set, but check
//to be sure that it is not beyond the first index
if (index<0)
! if (index >= -rows_size)
internalIndex = rows_size+index;
else {
beforeFirst();
***************
*** 794,799 ****
--- 847,856 ----
public java.sql.Array getArray(int i) throws SQLException
{
+ wasNullFlag = (this_row[i - 1] == null);
+ if(wasNullFlag)
+ return null;
+
if (i < 1 || i > fields.length)
throw new PSQLException("postgresql.res.colrange");
return (java.sql.Array) new org.postgresql.jdbc2.Array( connection, i, fields[i-1], this );
***************
*** 826,835 ****
--- 883,907 ----
public java.io.Reader getCharacterStream(int i) throws SQLException
{
+ wasNullFlag = (this_row[i - 1] == null);
+ if (wasNullFlag)
+ return null;
+
+ if (connection.haveMinimumCompatibleVersion("7.2")) {
+ //Version 7.2 supports AsciiStream for all the PG text types
+ //As the spec/javadoc for this method indicate this is to be used for
+ //large text values (i.e. LONGVARCHAR) PG doesn't have a separate
+ //long string datatype, but with toast the text datatype is capable of
+ //handling very large values. Thus the implementation ends up calling
+ //getString() since there is no current way to stream the value from the server
+ return new CharArrayReader(getString(i).toCharArray());
+ } else {
+ // In 7.1 Handle as BLOBS so return the LargeObject input stream
Encoding encoding = connection.getEncoding();
InputStream input = getBinaryStream(i);
return encoding.getDecodingReader(input);
}
+ }
/**
* New in 7.1
***************
*** 1485,1488 ****
--- 1557,1563 ----
}
}
}
+
+
}
+
*** ./src/interfaces/jdbc/org/postgresql/ResultSet.java.orig Sat Sep 8 23:12:41 2001
--- ./src/interfaces/jdbc/org/postgresql/ResultSet.java Fri Sep 7 10:46:01 2001
***************
*** 192,198 ****
String s = getString(col);
// Handle SQL Null
! if(s==null)
return null;
// Handle Money
--- 192,199 ----
String s = getString(col);
// Handle SQL Null
! wasNullFlag = (this_row[col - 1] == null);
! if(wasNullFlag)
return null;
// Handle Money