81 Commits

Author SHA1 Message Date
Rafa de la Torre
c9157cd1ab Merge pull request #9 from CartoDB/v2.x-carto-fix-interspersed-messages
Fix for interspersed messages
2019-06-04 16:52:35 +02:00
Rafa de la Torre
0acb072906 Update NEW.carto.md with fix for interspersed messages 2019-06-04 16:42:45 +02:00
Rafa de la Torre
645616c2e0 Test interspersed messages emit a warning event 2019-06-04 15:41:59 +02:00
Rafa de la Torre
0c787920a4 Test to assert interspersed messages do not break COPY TO flow 2019-06-04 15:32:10 +02:00
Rafa de la Torre
5fec3a5cc1 Replace console.log with an event 2019-06-04 14:37:04 +02:00
Rafa de la Torre
002a36bcfc Fix for interspersed messages
The way the messages were buffered caused interspersed messages to be
inserted in the middle of CopyData messages disrupting the normal COPY
TO flow.

This fixes it by consuming (adjusting offsets as appropriate) and just
logging them to console, effectively discarding them from the COPY
flow.
2019-06-04 12:21:46 +02:00
Daniel G. Aubert
be101502d9 Merge pull request #8 from CartoDB/copyto-detach-connection-after-done
Do not detach streams before finishing
2019-05-24 17:20:37 +02:00
Daniel García Aubert
c865db412b Update NEWS 2019-05-24 17:11:26 +02:00
Daniel García Aubert
edc0462470 Do not detach streams before finishing 2019-05-24 17:06:47 +02:00
Daniel G. Aubert
d26449cc69 Merge pull request #7 from CartoDB/add-carto-readme
Do not set postgres version
2019-05-24 16:55:55 +02:00
Daniel García Aubert
6b12cbc876 Do not set postgres version 2019-05-24 16:47:10 +02:00
Daniel G. Aubert
27ff2465ce Merge pull request #5 from CartoDB/add-carto-readme
Add NEWS Carto
2019-05-24 16:41:51 +02:00
Daniel García Aubert
69b5b60e9f Add NEWS Carto 2019-05-24 16:34:57 +02:00
jeromew
06cc013e8e Bump version 2019-03-21 16:05:04 +00:00
jeromew
7115f25ec1 Changelog 2019-03-21 16:03:11 +00:00
jeromew
38f9ebc606 Add benchmark to measure memory usage during copy-from 2019-03-21 15:16:49 +00:00
jeromew
ff39922e86 Add benchmark for copy-from 2019-03-21 13:22:13 +00:00
jeromew
cffae659b8 perf: remove one call to push in copy-from transform loop 2019-03-20 09:21:28 +00:00
jeromew
98d8d8a8e0 Stubs new version 2019-03-19 08:54:53 +00:00
jeromew
abf76d14fc Bump version 2019-03-19 08:49:24 +00:00
jeromew
abd92ae4ca Changelog 2019-03-19 08:48:29 +00:00
jeromew
6249052bc8 Add test for copy from client reuse 2019-03-18 17:45:55 +00:00
jeromew
291e257116 Add test for copy-to pg client re-use 2019-03-15 15:15:45 +00:00
jeromew
78c2d834c7 Update Changelog with recent commits 2019-03-15 13:19:13 +00:00
jeromew
2af7ce57b1 Merge branch 'master' of ssh://github.com/brianc/node-pg-copy-streams 2019-03-15 12:27:54 +00:00
jeromew
90480c7cfa Fix copy-to test intermittent 'unhandled promise rejection' bug 2019-03-15 12:24:54 +00:00
jeromew
df47d58d74 Do not register the pushBufferIfNeeded method on the instance 2019-03-15 10:12:20 +00:00
jeromew
7d546a5c34 Merge pull request #68 from jaweesner/master
Update README example to be pq 7.0 compliant
2019-03-15 10:52:45 +01:00
jeromew
9c46a8223e Stubs next version 2019-03-14 20:04:47 +00:00
jeromew
4ff1971113 Bump version 2019-03-14 18:56:17 +00:00
jeromew
c8661b8afb fix Makefile tabulation 2019-03-14 18:55:48 +00:00
jeromew
0b716029db Add major version support in Makefile 2019-03-14 18:52:46 +00:00
jeromew
660931ef1b Add 2.x changelog 2019-03-14 18:51:42 +00:00
jeromew
e3c2109c5a Add package-lock.json 2019-03-14 18:30:36 +00:00
jeromew
f01c278615 Improve copy-to overall performance by keeping the in-out chunk balance (ported from CartoDB's fork) 2019-03-14 18:24:15 +00:00
jeromew
1c3271ad06 Do not use deprecated Buffer constructor (ported from CartoDB's fork) 2019-03-14 18:00:12 +00:00
jeromew
43bc2cac7b Use strict (ported from CartoDB's fork) 2019-03-14 17:49:55 +00:00
jeromew
5e3934b71f Drop support for old version of Node.js
Add support for latest LTS releases and current
2019-03-14 17:34:17 +00:00
jeromew
69f670ab13 Update dev dependencies 2019-03-14 17:30:34 +00:00
jaweesner
455e170084 Update to be pq 7.0 compilant
pg singletons are no longer supported according to https://node-postgres.com/guides/upgrading , updated example to reflect upgrade changes.
2018-04-12 19:36:09 -07:00
jeromew
e15feb199a README.md: copy-from error and vacuum #26 2016-08-23 12:10:25 +02:00
jeromew
191a4ec16a Fix documentation of copy-from completion 2016-08-23 11:32:10 +02:00
jeromew
399bff7ed7 Bump version 2016-08-22 16:27:04 +00:00
jeromew
8174e10fb5 Merge pull request #54 from jeromew/upstream-end
Test: `end` event should not be triggered 2 times on copy-from
2016-08-22 17:47:29 +02:00
jeromew
f29aef3bba Merge pull request #53 from jeromew/upstream-frontier
Bugfix - wrong tests on chunk frontiers
2016-08-22 17:47:01 +02:00
jeromew
b2e108571e Issue #54: We should probably delay the _flush cb() to CommandComplete 2016-07-30 00:08:54 +00:00
jeromew
7003f6070f Fix issue #54: end is being triggered 2 times 2016-07-29 23:51:41 +00:00
jeromew
ade7ab95a0 Test: end event should not be triggered 2 times on copy-from 2016-07-29 23:47:39 +00:00
jeromew
9ccda04036 Bugfix - Chunk frontiers were not correctly tested 2016-07-28 23:05:50 +00:00
jeromew
a5e532f20b Bugfix - wrong tests on chunk frontiers 2016-07-28 22:55:02 +00:00
jeromew
2a4db2920e Fix NoticeResponse test and handle other messages (#52) 2016-07-28 16:13:22 -05:00
jeromew
f155899570 Add a test for NoticeResponse handling (#51) 2016-07-28 13:15:59 -05:00
brianc
18be125596 Bump version 2016-07-26 14:40:01 -05:00
jeromew
ae5b344395 Refactor message format codes handling (#45) 2016-07-26 14:39:48 -05:00
jeromew
ee84aba89f Use end stream option instead of not calling _flush callback (#44) 2016-07-26 14:39:33 -05:00
brianc
e0aa7db324 Bump version 2016-05-24 17:20:26 -05:00
Jonathan Bergknoff
0f0ddf7ad4 Expose row count after COPY FROM command (#37)
* Expose row count after COPY FROM command

* correct conditional
2016-05-24 17:20:16 -05:00
brianc
ed57e131e9 Drop support for node@v0.10 2016-05-03 13:21:28 -05:00
Brian C
c4a0e6dd58 Fix compatibility with newer versions of node (#39)
* Eliminate detach/reattach strategy as it isn't able to differentiate between on/once and inconsistenly loses unshifted data depending on node version.  Instead just split stream and send it to copy stream and the connection.stream at the same time.  Disconnecting copy stream just means unpiping.  Added handleCopyData to fulfill query contract but ignore the incoming data.

Add node 4.2.2 to Travis
Minimum postgres 9.2 to allow tests to complete in Travis

Remove test that is no longer needed since we no longer disconnect/reconnect listeners

* Add resume

* Remove node 0.10 and add 0.12

* Re-enable old tests

* Add more versions to the travis test matrix
2016-05-03 13:20:04 -05:00
Dan Robinson
d5b5c8c569 Merge pull request #33 from alexconlin/master
Change file format in example from .tdv to .tsv
2015-12-09 17:14:26 -08:00
Alex Conlin
6981ea6ac5 Change file format in example from .tdv to .tsv 2015-12-09 20:31:54 +00:00
brianc
d2b9677a02 Fix travis build 2015-03-09 08:11:33 -06:00
Brian M. Carlson
bfee86543f Remove database from .travis.yml - that is not the problem 2014-09-16 00:34:34 -04:00
Brian C
21f47220fc Merge pull request #20 from drob/error-message
Better test of error handling after initial response.
2014-09-16 00:33:21 -04:00
Dan
b1b613125f Better test of error handling after initial response. 2014-09-16 00:24:14 -04:00
Brian M. Carlson
4222053744 Last try before I abandon travis 2014-09-15 23:50:14 -04:00
Brian M. Carlson
040ed5f4da Mess with travis config 2014-09-15 23:48:14 -04:00
Brian M. Carlson
f9ee1c083a Merge branch 'master' of github.com:brianc/node-pg-copy-streams 2014-09-15 23:39:49 -04:00
Brian M. Carlson
dcfffd0670 Try new travis changes 2014-09-15 23:39:42 -04:00
Brian C
12e4ca33b0 Update README.md
dat travis badge
2014-09-15 21:10:42 -04:00
Brian M. Carlson
8f2355e454 0.3.0 2014-09-15 20:56:39 -04:00
Brian M. Carlson
d6eab36b66 Make tests a bit more robusto 2014-09-15 20:56:34 -04:00
Brian M. Carlson
33f6ecc11b Add workflow boilerplate files 2014-09-15 20:49:09 -04:00
Brian C
36572a8b7b Merge pull request #16 from drob/fix-docs
Fixes pipe from a file to table example in README.md.
2014-09-15 20:48:23 -04:00
Brian C
0c5d08edae Merge pull request #19 from drob/transform-opts
Accept stream options in constructors, pass to internal transform streams.
2014-09-15 20:47:38 -04:00
Dan
b78a3eb845 Accept stream options in constructors, pass to internal transform streams.
Includes tests.
2014-09-15 15:01:39 -04:00
Dan
107f007249 Fixes pipe from a file to table example in README.md. 2014-08-10 13:33:33 -07:00
Brian C
25b8d6da5f Update README.md
Providing clarity for #6
2014-05-01 11:07:33 -05:00
Dan Robinson
1c0c8871c1 Bump version 2014-04-07 11:56:28 -07:00
Dan Robinson
beb54334e2 Merge pull request #12 from drob/error-handling
Adds handling for errors after initial response.
2014-04-07 11:55:36 -07:00
Dan Robinson
1db9b3ec3d Adds handling for errors after initial response.
Includes a test.
2014-04-06 22:04:20 -07:00
16 changed files with 885 additions and 135 deletions

4
.npmignore Normal file
View File

@@ -0,0 +1,4 @@
.gitignore
.travis.yml
bench/
test/

14
.travis.yml Normal file
View File

@@ -0,0 +1,14 @@
language: node_js
node_js:
- "6"
- "8"
- "10"
- "11"
services:
- postgresql
before_install:
- npm install npm --global
env:
- PGUSER=postgres PGDATABASE=postgres

19
Makefile Normal file
View File

@@ -0,0 +1,19 @@
.PHONY: publish-patch test
test:
npm test
patch: test
npm version patch -m "Bump version"
git push origin master --tags
npm publish
minor: test
npm version minor -m "Bump version"
git push origin master --tags
npm publish
major: test
npm version major -m "Bump version"
git push origin master --tags
npm publish

34
NEWS.carto.md Normal file
View File

@@ -0,0 +1,34 @@
# CARTO's Changelog
## v2.2.0-carto.1
Released 2019-mm-dd
Bug fixes:
* Copy to: ensure stream is detached when finished
* Copy to: deal with interspersed messages properly. See [#9](https://github.com/CartoDB/node-pg-copy-streams/pull/9)
## v1.2.0-carto.3
Released 2018-11-21
Features:
* Drop support for Node.js 0.12, 4 and, 5.
* Add support for Node.js 8 and 10.
* Add package-lock.json
* Do not use deprecated Buffer constructors.
## v1.2.0-carto.2
Released 2018-10-26
Bug fixes:
* Make all modules to use strict mode semantics.
## v1.2.0-carto.1
Released 2018-06-11
Bug fixes:
* Improves performance of COPY TO by sending bigger chunks through low level `push()`. See https://github.com/CartoDB/node-pg-copy-streams/pull/1
## v1.2.0
Released 2016-08-22
Vanilla version v1.2.0 from upstream repository. See https://github.com/CartoDB/node-pg-copy-streams/releases/tag/v1.2.0

View File

@@ -1,5 +1,7 @@
## pg-copy-streams
[![Build Status](https://travis-ci.org/brianc/node-pg-copy-streams.svg)](https://travis-ci.org/brianc/node-pg-copy-streams)
COPY FROM / COPY TO for node-postgres. Stream from one database to another, and stuff.
## how? what? huh?
@@ -18,10 +20,12 @@ If you're not familiar with the feature (I wasn't either) you can read this for
### pipe from a table to stdout
```js
var pg = require('pg');
var {Pool} = require('pg');
var copyTo = require('pg-copy-streams').to;
pg.connect(function(err, client, done) {
var pool = new Pool();
pool.connect(function(err, client, done) {
var stream = client.query(copyTo('COPY my_table TO STDOUT'));
stream.pipe(process.stdout);
stream.on('end', done);
@@ -33,18 +37,24 @@ pg.connect(function(err, client, done) {
```js
var fs = require('fs');
var pg = require('pg');
var {Pool} = require('pg');
var copyFrom = require('pg-copy-streams').from;
pg.connect(function(err, client, done) {
var pool = new Pool();
pool.connect(function(err, client, done) {
var stream = client.query(copyFrom('COPY my_table FROM STDIN'));
var fileStream = fs.createReadStream('some_file.tdv')
fileStream.pipe(stream);
fileStream.on('end', done);
var fileStream = fs.createReadStream('some_file.tsv')
fileStream.on('error', done);
stream.on('error', done);
stream.on('end', done);
fileStream.pipe(stream);
});
```
*Important*: Even if `pg-copy-streams.from` is used as a Writable (via `pipe`), you should not listen for the 'finish' event and expect that the COPY command has already been correctly acknowledged by the database. Internally, a duplex stream is used to pipe the data into the database connection and the COPY command should be considered complete only when the 'end' event is triggered.
## install
```sh
@@ -53,9 +63,34 @@ $ npm install pg-copy-streams
## notice
Before you set out on this magical piping journey, you _really_ should read this: http://www.postgresql.org/docs/9.3/static/sql-copy.html, and you might want to take a look at the [tests](https://github.com/brianc/node-pg-copy-streams/tree/master/test) to get an idea of how things work.
This module __only__ works with the pure JavaScript bindings. If you're using `require('pg').native` please make sure to use normal `require('pg')` or `require('pg.js')` when you're using copy streams.
## contributing
Before you set out on this magical piping journey, you _really_ should read this: http://www.postgresql.org/docs/current/static/sql-copy.html, and you might want to take a look at the [tests](https://github.com/brianc/node-pg-copy-streams/tree/master/test) to get an idea of how things work.
Take note of the following warning in the PostgreSQL documentation:
> COPY stops operation at the first error. This should not lead to problems in the event of a COPY TO, but the target table will already have received earlier rows in a COPY FROM. These rows will not be visible or accessible, but they still occupy disk space. This might amount to a considerable amount of wasted disk space if the failure happened well into a large copy operation. You might wish to invoke VACUUM to recover the wasted space.
## benchmarks
The COPY command is commonly used to move huge sets of data. This can put some pressure on the node.js loop, the amount of CPU or the amount of memory used.
There is a bench/ directory in the repository where benchmark scripts are stored. If you have performance issues with `pg-copy-stream` do not hesitate to write a new benchmark that highlights your issue. Please avoid to commit huge files (PR won't be accepted) and find other ways to generate huge datasets.
If you have a local instance of postgres on your machine, you can start a benchmark for example with
```sh
$ cd bench
$ PGPORT=5432 PGDATABASE=postgres node copy-from.js
```
## tests
In order to launch the test suite, you need to have a local instance of postgres running on your machine.
```sh
$ PGPORT=5432 PGDATABASE=postgres make test
```
## contributing
Instead of adding a bunch more code to the already bloated [node-postgres](https://github.com/brianc/node-postgres) I am trying to make the internals extensible and work on adding edge-case features as 3rd party modules.
This is one of those.
@@ -68,6 +103,36 @@ Generally how I work is if you submit a few pull requests and you're interested
Since this isn't a module with tons of installs and dependent modules I hope we can work together on this to iterate faster here and make something really useful.
## changelog
### version 2.x - published YYYY-MM-DD
### version 2.2.0 - published YYYY-MM-DD
* Small refactor in copy-from passing from 3 push to 2 push in every chunk transform loop
* Add bench/ directory for benchmarks
* Add benchmark to compare performance of pg-copy-stream wrt psql during copy-from
* Add benchmark to measure memory usage of copy-from
### version 2.1.0 - published 2019-03-19
* Change README to stop using the pg pool singleton (removed after pg 7.0)
* Do not register copy-to.pushBufferIfNeeded on the instance itself (avoid dangling method on the object)
* Fix copy-to test wrt intermittent unhandled promise bug
* Add tests regarding client re-use
### version 2.0.0 - published 2019-03-14
This version's major change is a modification in the COPY TO implementation. In the previous version, when a chunk was received from the database, it was analyzed and every row contained within that chunk was pushed individually down the stream pipeline. Small rows could lead to a "one chunk" / "thousands of row pushed" performance issue in node. Thanks to @rafatower & CartoDB for the patch.
This is considered to be a major change since some people could be relying on the fact that each outgoing chunk is an individual row.
Other changes in this version
* Use Strict
* Travis deprecation of old node version (0.12, 0.4). Support LTS 6, 8, 10 and Current 11
* Update dev dependencies (pg, lodash)
* Stop using deprecated Buffer constructor
* Add package-lock.json
## license
The MIT License (MIT)

45
bench/copy-from-memory.js Normal file
View File

@@ -0,0 +1,45 @@
var cp = require('duplex-child-process');
var pg = require('pg')
var copy = require('../').from
var client = function() {
var client = new pg.Client()
client.connect()
return client
}
var inStream = function() {
return cp.spawn('seq', ['0', '29999999']);
}
var running = true;
var c = client();
c.query('DROP TABLE IF EXISTS plugnumber', function() {
c.query('CREATE TABLE plugnumber (num int)', function() {
var seq = inStream()
var from = c.query(copy('COPY plugnumber FROM STDIN'))
seq.pipe(from);
from.on('end', function() {
running = false;
c.end();
})
})
})
var rssMin = process.memoryUsage().rss / 1024 / 1024
var rssMax = rssMin
memlog = function() {
var rss = process.memoryUsage().rss / 1024 / 1024
rssMin = Math.min(rss, rssMin)
rssMax = Math.max(rss, rssMax)
console.log('rss:' + Math.round(rss*100)/100 + 'MB rssMin:'+ Math.round(rssMin*100)/100 + 'MB rssMax:' + Math.round(rssMax*100)/100 + 'MB')
if (running) {
setTimeout(memlog, 1000);
}
}
memlog()

86
bench/copy-from.js Normal file
View File

@@ -0,0 +1,86 @@
var Benchmark = require('benchmark');
var cp = require('duplex-child-process');
var pg = require('pg')
var copy = require('../').from
var client = function() {
var client = new pg.Client()
client.connect()
return client
}
var psql = '/opt/postgresql-9.6.1/bin/psql'
var limit = 999999;
var inStream = function() {
return cp.spawn('seq', ['0', ''+limit]);
}
var suite = new Benchmark.Suite;
suite
.add({
name: 'unix pipe into psql COPY',
defer: true,
fn: function(d) {
var c = client();
c.query('DROP TABLE IF EXISTS plugnumber', function() {
c.query('CREATE TABLE plugnumber (num int)', function() {
c.end();
var from = cp.spawn('sh', ['-c', 'seq 0 '+limit+' | '+psql+' postgres -c \'COPY plugnumber FROM STDIN\''])
from.on('close', function() {
d.resolve();
})
})
})
}
})
.add({
name: 'pipe into psql COPY',
defer: true,
fn: function(d) {
var c = client();
c.query('DROP TABLE IF EXISTS plugnumber', function() {
c.query('CREATE TABLE plugnumber (num int)', function() {
c.end();
var seq = inStream();
var from = cp.spawn(psql, ['postgres', '-c', 'COPY plugnumber FROM STDIN'])
seq.pipe(from);
from.on('close', function() {
d.resolve();
})
})
})
}
})
.add({
name: 'pipe into pg-copy-stream COPY',
defer: true,
fn: function(d) {
var c = client();
c.query('DROP TABLE IF EXISTS plugnumber', function() {
c.query('CREATE TABLE plugnumber (num int)', function() {
var seq = inStream()
var from = c.query(copy('COPY plugnumber FROM STDIN'))
seq.pipe(from);
from.on('end', function() {
c.end();
d.resolve();
})
})
})
}
})
.on('cycle', function(event) {
console.log(String(event.target));
})
.on('complete', function() {
console.log('Fastest is ' + this.filter('fastest').map('name'));
});
var c = client()
c.query('DROP TABLE IF EXISTS plugnumber', function() {
c.end();
suite.run();
})

View File

@@ -1,15 +1,17 @@
module.exports = function(txt) {
return new CopyStreamQuery(txt)
'use strict';
module.exports = function(txt, options) {
return new CopyStreamQuery(txt, options)
}
var Transform = require('stream').Transform
var util = require('util')
var code = require('./message-formats')
var CopyStreamQuery = function(text) {
Transform.call(this)
var CopyStreamQuery = function(text, options) {
Transform.call(this, options)
this.text = text
this._listeners = {}
this._copyOutResponse = null
this._gotCopyOutResponse = false
this.rowCount = 0
}
@@ -20,77 +22,101 @@ var eventTypes = ['close', 'data', 'end', 'error']
CopyStreamQuery.prototype.submit = function(connection) {
connection.query(this.text)
this.connection = connection
var self = this
eventTypes.forEach(function(type) {
self._listeners[type] = connection.stream.listeners(type)
connection.stream.removeAllListeners(type)
})
this.connection.removeAllListeners('copyData')
this.on('end', () => this._detach())
connection.stream.pipe(this)
}
var code = {
E: 69, //Error
H: 72, //CopyOutResponse
d: 0x64, //CopyData
c: 0x63 //CopyDone
}
CopyStreamQuery.prototype._detach = function() {
this.connection.stream.unpipe()
var self = this
eventTypes.forEach(function(type) {
self.connection.stream.removeAllListeners(type)
self._listeners[type].forEach(function(listener) {
self.connection.stream.on(type, listener)
})
})
this.connection.stream.unpipe(this)
// Unpipe can drop us out of flowing mode
this.connection.stream.resume()
}
CopyStreamQuery.prototype._transform = function(chunk, enc, cb) {
var offset = 0
var Byte1Len = 1;
var Int32Len = 4;
if(this._remainder && chunk) {
chunk = Buffer.concat([this._remainder, chunk])
}
if(!this._copyOutResponse) {
this._copyOutResponse = true
if(chunk[0] == code.E) {
this._detach()
this.connection.stream.unshift(chunk)
this.push(null)
return cb();
var length;
var messageCode;
var needPush = false;
var buffer = Buffer.alloc(chunk.length);
var buffer_offset = 0;
var self = this;
var pushBufferIfneeded = function() {
if (needPush && buffer_offset > 0) {
self.push(buffer.slice(0, buffer_offset))
buffer_offset = 0;
}
if(chunk[0] != code.H) {
this.emit('error', new Error('Expected copy out response'))
}
var length = chunk.readUInt32BE(1)
offset = 1
offset += length
}
while((chunk.length - offset) > 5) {
while((chunk.length - offset) >= (Byte1Len + Int32Len)) {
var messageCode = chunk[offset]
//complete
if(messageCode == code.c) {
this._detach()
this.connection.stream.unshift(chunk.slice(offset + 5))
this.push(null)
return cb();
//console.log('PostgreSQL message ' + String.fromCharCode(messageCode))
switch(messageCode) {
// detect COPY start
case code.CopyOutResponse:
if (!this._gotCopyOutResponse) {
this._gotCopyOutResponse = true
} else {
this.emit('error', new Error('Unexpected CopyOutResponse message (H)'))
}
break;
// meaningful row
case code.CopyData:
needPush = true;
break;
// standard interspersed messages. discard
case code.ParameterStatus:
case code.NoticeResponse:
case code.NotificationResponse:
break;
case code.ErrorResponse:
case code.CopyDone:
pushBufferIfneeded();
this.push(null)
return cb();
break;
default:
this.emit('error', new Error('Unexpected PostgreSQL message ' + String.fromCharCode(messageCode)))
}
//something bad happened
if(messageCode != code.d) {
return this.emit('error', new Error('expected "d" (copydata message)'))
}
var length = chunk.readUInt32BE(offset + 1) - 4 //subtract length of UInt32
//can we read the next row?
if(chunk.length > (offset + length + 5)) {
offset += 5
var slice = chunk.slice(offset, offset + length)
offset += length
this.push(slice)
this.rowCount++
length = chunk.readUInt32BE(offset+Byte1Len)
if(chunk.length >= (offset + Byte1Len + length)) {
offset += Byte1Len + Int32Len
var message = chunk.slice(offset, offset + length - Int32Len)
switch(messageCode) {
case code.CopyData:
this.rowCount++;
message.copy(buffer, buffer_offset);
buffer_offset += message.length;
break;
case code.ParameterStatus:
case code.NoticeResponse:
case code.NotificationResponse:
this.emit('warning', 'Got an interspersed message: ' + message);
break;
}
offset += (length - Int32Len);
} else {
// we need more chunks for a complete message
break;
}
}
pushBufferIfneeded();
if(chunk.length - offset) {
var slice = chunk.slice(offset)
this._remainder = slice
@@ -104,6 +130,9 @@ CopyStreamQuery.prototype.handleError = function(e) {
this.emit('error', e)
}
CopyStreamQuery.prototype.handleCopyData = function(chunk) {
}
CopyStreamQuery.prototype.handleCommandComplete = function() {
}

View File

@@ -1,19 +1,21 @@
var CopyToQueryStream = require('./copy-to')
'use strict';
var CopyToQueryStream = require('./copy-to')
module.exports = {
to: function(txt) {
return new CopyToQueryStream(txt)
to: function(txt, options) {
return new CopyToQueryStream(txt, options)
},
from: function (txt) {
return new CopyStreamQuery(txt)
from: function (txt, options) {
return new CopyStreamQuery(txt, options)
}
}
var Transform = require('stream').Transform
var util = require('util')
var code = require('./message-formats')
var CopyStreamQuery = function(text) {
Transform.call(this)
var CopyStreamQuery = function(text, options) {
Transform.call(this, options)
this.text = text
this._listeners = null
this._copyOutResponse = null
@@ -27,28 +29,21 @@ CopyStreamQuery.prototype.submit = function(connection) {
connection.query(this.text)
}
var code = {
H: 72, //CopyOutResponse
d: 0x64, //CopyData
c: 0x63 //CopyDone
}
var copyDataBuffer = Buffer([code.d])
CopyStreamQuery.prototype._transform = function(chunk, enc, cb) {
this.push(copyDataBuffer)
var lenBuffer = Buffer(4)
lenBuffer.writeUInt32BE(chunk.length + 4, 0)
var Int32Len = 4;
var lenBuffer = Buffer.from([code.CopyData, 0, 0, 0, 0])
lenBuffer.writeUInt32BE(chunk.length + Int32Len, 1)
this.push(lenBuffer)
this.push(chunk)
this.rowCount++
cb()
}
CopyStreamQuery.prototype._flush = function(cb) {
var finBuffer = Buffer([code.c, 0, 0, 0, 4])
var Int32Len = 4;
var finBuffer = Buffer.from([code.CopyDone, 0, 0, 0, Int32Len])
this.push(finBuffer)
//never call this callback, do not close underlying stream
//cb()
this.cb_flush = cb
}
CopyStreamQuery.prototype.handleError = function(e) {
@@ -56,12 +51,24 @@ CopyStreamQuery.prototype.handleError = function(e) {
}
CopyStreamQuery.prototype.handleCopyInResponse = function(connection) {
this.pipe(connection.stream)
this.pipe(connection.stream, { end: false })
}
CopyStreamQuery.prototype.handleCommandComplete = function() {
this.unpipe()
this.emit('end')
CopyStreamQuery.prototype.handleCommandComplete = function(msg) {
// Parse affected row count as in
// https://github.com/brianc/node-postgres/blob/35e5567f86774f808c2a8518dd312b8aa3586693/lib/result.js#L37
var match = /COPY (\d+)/.exec((msg || {}).text)
if (match) {
this.rowCount = parseInt(match[1], 10)
}
// we delay the _flush cb so that the 'end' event is
// triggered after CommandComplete
this.cb_flush()
// unpipe from connection
this.unpipe(this.connection)
this.connection = null
}
CopyStreamQuery.prototype.handleReadyForQuery = function() {

25
message-formats.js Normal file
View File

@@ -0,0 +1,25 @@
/**
* The COPY feature uses the following protocol codes.
* The codes for the most recent protocol version are documented on
* https://www.postgresql.org/docs/current/static/protocol-message-formats.html
*
* The protocol flow itself is described on
* https://www.postgresql.org/docs/current/static/protocol-flow.html
*/
module.exports = {
ErrorResponse: 0x45, // E
CopyInResponse: 0x47, // G
CopyOutResponse: 0x48, // H
CopyBothResponse: 0x57, // W
CopyDone: 0x63, // c
CopyData: 0x64, // d
CopyFail: 0x66, // f
// It is possible for NoticeResponse and ParameterStatus messages to be interspersed between CopyData messages;
// frontends must handle these cases, and should be prepared for other asynchronous message types as well
// (see Section 50.2.6).
// Otherwise, any message type other than CopyData or CopyDone may be treated as terminating copy-out mode.
NotificationResponse: 0x41, // A
NoticeResponse: 0x4E, // N
ParameterStatus: 0x53 // S
}

215
package-lock.json generated Normal file
View File

@@ -0,0 +1,215 @@
{
"name": "pg-copy-streams",
"version": "2.2.0",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
"async": {
"version": "0.2.10",
"resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz",
"integrity": "sha1-trvgsGdLnXGXCMo43owjfLUmw9E=",
"dev": true
},
"base64-js": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-0.0.2.tgz",
"integrity": "sha1-Ak8Pcq+iW3X5wO5zzU9V7Bvtl4Q=",
"dev": true
},
"benchmark": {
"version": "2.1.4",
"resolved": "https://registry.npmjs.org/benchmark/-/benchmark-2.1.4.tgz",
"integrity": "sha1-CfPeMckWQl1JjMLuVloOvzwqVik=",
"dev": true,
"requires": {
"lodash": "^4.17.4",
"platform": "^1.3.3"
}
},
"bops": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/bops/-/bops-0.0.6.tgz",
"integrity": "sha1-CC0dVfoB5g29wuvC26N/ZZVUzzo=",
"dev": true,
"requires": {
"base64-js": "0.0.2",
"to-utf8": "0.0.1"
}
},
"buffer-writer": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/buffer-writer/-/buffer-writer-2.0.0.tgz",
"integrity": "sha512-a7ZpuTZU1TRtnwyCNW3I5dc0wWNC3VR9S++Ewyk2HHZdrO3CQJqSpd+95Us590V6AL7JqUAH2IwZ/398PmNFgw==",
"dev": true
},
"concat-stream": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.1.0.tgz",
"integrity": "sha1-hCae/YzGUCdeMi8wnfRIZ7xRxfM=",
"dev": true,
"requires": {
"bops": "0.0.6"
}
},
"duplex-child-process": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/duplex-child-process/-/duplex-child-process-1.0.0.tgz",
"integrity": "sha1-SpSXQob7x4QNCFPSs/5ZCp20YUc=",
"dev": true
},
"gonna": {
"version": "0.0.0",
"resolved": "https://registry.npmjs.org/gonna/-/gonna-0.0.0.tgz",
"integrity": "sha1-6k4ZsVJ6F4LhJQVeMCSabUvHmlk=",
"dev": true
},
"heroku-env": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/heroku-env/-/heroku-env-0.1.1.tgz",
"integrity": "sha1-wGeRyUTpuHSOMXf1S/cBQyZ+Yxc=",
"dev": true,
"requires": {
"parse-database-url": "~0.2.0"
}
},
"lodash": {
"version": "4.17.11",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz",
"integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==",
"dev": true
},
"packet-reader": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/packet-reader/-/packet-reader-1.0.0.tgz",
"integrity": "sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ==",
"dev": true
},
"parse-database-url": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/parse-database-url/-/parse-database-url-0.2.2.tgz",
"integrity": "sha1-SGFa56fA/HfjKU0jVCpqUnPDVws=",
"dev": true
},
"pg": {
"version": "7.8.2",
"resolved": "https://registry.npmjs.org/pg/-/pg-7.8.2.tgz",
"integrity": "sha512-5U4fjV43DnQxelkhyPdU3YfUbYVa21bNmreXRCM/gFFw09YxWaitWWITm/u0twUNF5EYOSDhkgyEAocgtpP9JQ==",
"dev": true,
"requires": {
"buffer-writer": "2.0.0",
"packet-reader": "1.0.0",
"pg-connection-string": "0.1.3",
"pg-pool": "^2.0.4",
"pg-types": "~2.0.0",
"pgpass": "1.x",
"semver": "4.3.2"
}
},
"pg-connection-string": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-0.1.3.tgz",
"integrity": "sha1-2hhHsglA5C7hSSvq9l1J2RskXfc=",
"dev": true
},
"pg-int8": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz",
"integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==",
"dev": true
},
"pg-pool": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-2.0.6.tgz",
"integrity": "sha512-hod2zYQxM8Gt482q+qONGTYcg/qVcV32VHVPtktbBJs0us3Dj7xibISw0BAAXVMCzt8A/jhfJvpZaxUlqtqs0g==",
"dev": true
},
"pg-types": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.0.0.tgz",
"integrity": "sha512-THUD7gQll5tys+5eQ8Rvs7DjHiIC3bLqixk3gMN9Hu8UrCBAOjf35FoI39rTGGc3lM2HU/R+Knpxvd11mCwOMA==",
"dev": true,
"requires": {
"pg-int8": "1.0.1",
"postgres-array": "~2.0.0",
"postgres-bytea": "~1.0.0",
"postgres-date": "~1.0.0",
"postgres-interval": "^1.1.0"
}
},
"pgpass": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.2.tgz",
"integrity": "sha1-Knu0G2BltnkH6R2hsHwYR8h3swY=",
"dev": true,
"requires": {
"split": "^1.0.0"
}
},
"platform": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/platform/-/platform-1.3.5.tgz",
"integrity": "sha512-TuvHS8AOIZNAlE77WUDiR4rySV/VMptyMfcfeoMgs4P8apaZM3JrnbzBiixKUv+XR6i+BXrQh8WAnjaSPFO65Q==",
"dev": true
},
"postgres-array": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz",
"integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==",
"dev": true
},
"postgres-bytea": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz",
"integrity": "sha1-AntTPAqokOJtFy1Hz5zOzFIazTU=",
"dev": true
},
"postgres-date": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.3.tgz",
"integrity": "sha1-4tiXAu/bJY/52c7g/pG9BpdSV6g=",
"dev": true
},
"postgres-interval": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz",
"integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==",
"dev": true,
"requires": {
"xtend": "^4.0.0"
}
},
"semver": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/semver/-/semver-4.3.2.tgz",
"integrity": "sha1-x6BxWKgL7dBSNVt3DYLWZA+AO+c=",
"dev": true
},
"split": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz",
"integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==",
"dev": true,
"requires": {
"through": "2"
}
},
"through": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
"integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
"dev": true
},
"to-utf8": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/to-utf8/-/to-utf8-0.0.1.tgz",
"integrity": "sha1-0Xrqcv8vujm55DYBvns/9y4ImFI=",
"dev": true
},
"xtend": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz",
"integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68=",
"dev": true
}
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "pg-copy-streams",
"version": "0.2.3",
"version": "2.2.0",
"description": "Low-Level COPY TO and COPY FROM streams for PostgreSQL in JavaScript using",
"main": "index.js",
"scripts": {
@@ -23,11 +23,13 @@
"url": "https://github.com/brianc/node-pg-copy-streams/issues"
},
"devDependencies": {
"pg.js": "~2.8.1",
"async": "~0.2.10",
"benchmark": "^2.1.4",
"concat-stream": "~1.1.0",
"duplex-child-process": "^1.0.0",
"gonna": "0.0.0",
"lodash": "~2.2.1",
"heroku-env": "~0.1.1",
"async": "~0.2.10"
"lodash": "^4.17.11",
"pg": "^7.8.2"
}
}

View File

@@ -1,10 +1,12 @@
'use strict';
var assert = require('assert')
var gonna = require('gonna')
var async = require('async')
var concat = require('concat-stream')
var _ = require('lodash')
var pg = require('pg.js')
var pg = require('pg')
var from = require('../').from
var to = require('../').to
@@ -19,10 +21,12 @@ var testBinaryCopy = function() {
var fromClient = client()
var toClient = client()
queries = [
'CREATE TABLE data (num BIGINT, word TEXT)',
var queries = [
'DROP TABLE IF EXISTS data',
'CREATE TABLE IF NOT EXISTS data (num BIGINT, word TEXT)',
'INSERT INTO data (num, word) VALUES (1, \'hello\'), (2, \'other thing\'), (3, \'goodbye\')',
'CREATE TABLE data_copy (LIKE data INCLUDING ALL)'
'DROP TABLE IF EXISTS data_copy',
'CREATE TABLE IF NOT EXISTS data_copy (LIKE data INCLUDING ALL)'
]
async.eachSeries(queries, _.bind(fromClient.query, fromClient), function(err) {
@@ -31,7 +35,7 @@ var testBinaryCopy = function() {
var fromStream = fromClient.query(to('COPY (SELECT * FROM data) TO STDOUT BINARY'))
var toStream = toClient.query(from('COPY data_copy FROM STDIN BINARY'))
runStream = function(callback) {
var runStream = function(callback) {
fromStream.on('error', callback)
toStream.on('error', callback)
toStream.on('finish', callback)

View File

@@ -1,31 +1,39 @@
'use strict';
var assert = require('assert')
var gonna = require('gonna')
var concat = require('concat-stream')
var _ = require('lodash')
var pg = require('pg.js')
var pg = require('pg')
var copy = require('../').from
var client = function() {
var client = new pg.Client()
client.connect()
return client
}
var testConstruction = function() {
var highWaterMark = 10
var stream = copy('COPY numbers FROM STDIN', {highWaterMark: 10, objectMode: true})
for(var i = 0; i < highWaterMark * 1.5; i++) {
stream.write('1\t2\n')
}
assert(!stream.write('1\t2\n'), 'Should correctly set highWaterMark.')
}
testConstruction()
var testRange = function(top) {
var client = function() {
var client = new pg.Client()
client.connect()
client.query('CREATE TEMP TABLE numbers(num int, bigger_num int)')
return client
}
var fromClient = client()
var copy = require('../').from
fromClient.query('CREATE TEMP TABLE numbers(num int, bigger_num int)')
var txt = 'COPY numbers FROM STDIN'
var stream = fromClient.query(copy(txt))
var rowEmitCount = 0
stream.on('row', function() {
rowEmitCount++
})
for(var i = 0; i < top; i++) {
stream.write(Buffer('' + i + '\t' + i*10 + '\n'))
stream.write(Buffer.from('' + i + '\t' + i*10 + '\n'))
}
stream.end()
var countDone = gonna('have correct count')
@@ -33,7 +41,8 @@ var testRange = function(top) {
fromClient.query('SELECT COUNT(*) FROM numbers', function(err, res) {
assert.ifError(err)
assert.equal(res.rows[0].count, top, 'expected ' + top + ' rows but got ' + res.rows[0].count)
console.log('found ', res.rows.length, 'rows')
assert.equal(stream.rowCount, top, 'expected ' + top + ' rows but db count is ' + stream.rowCount)
//console.log('found ', res.rows.length, 'rows')
countDone()
var firstRowDone = gonna('have correct result')
assert.equal(stream.rowCount, top, 'should have rowCount ' + top + ' ')
@@ -48,3 +57,48 @@ var testRange = function(top) {
}
testRange(1000)
var testSingleEnd = function() {
var fromClient = client()
fromClient.query('CREATE TEMP TABLE numbers(num int)')
var txt = 'COPY numbers FROM STDIN';
var stream = fromClient.query(copy(txt))
var count = 0;
stream.on('end', function() {
count++;
assert(count==1, '`end` Event was triggered ' + count + ' times');
if (count == 1) fromClient.end();
})
stream.end(Buffer.from('1\n'))
}
testSingleEnd()
var testClientReuse = function() {
var fromClient = client()
fromClient.query('CREATE TEMP TABLE numbers(num int)')
var txt = 'COPY numbers FROM STDIN';
var count = 0;
var countMax = 2;
var card = 100000;
var runStream = function() {
var stream = fromClient.query(copy(txt))
stream.on('end', function() {
count++;
if (count<countMax) {
runStream()
} else {
fromClient.query('SELECT sum(num) AS s FROM numbers', function(err, res) {
var total = countMax * card * (card+1)
assert.equal(res.rows[0].s, total, 'copy-from.ClientReuse wrong total')
fromClient.end()
})
}
})
stream.write(Buffer.from(_.range(0, card+1).join('\n') + '\n'))
stream.end(Buffer.from(_.range(0, card+1).join('\n') + '\n'))
}
runStream();
}
testClientReuse()

View File

@@ -1,12 +1,15 @@
'use strict';
var assert = require('assert')
var gonna = require('gonna')
var _ = require('lodash')
var async = require('async')
var concat = require('concat-stream')
var pg = require('pg.js')
var pg = require('pg')
var copy = require('../').to
var code = require('../message-formats')
var client = function() {
var client = new pg.Client()
@@ -14,9 +17,30 @@ var client = function() {
return client
}
var testConstruction = function() {
var txt = 'COPY (SELECT * FROM generate_series(0, 10)) TO STDOUT'
var stream = copy(txt, {highWaterMark: 10})
assert.equal(stream._readableState.highWaterMark, 10, 'Client should have been set with a correct highWaterMark.')
}
testConstruction()
var testComparators = function() {
var copy1 = copy();
copy1.pipe(concat(function(buf) {
assert(copy1._gotCopyOutResponse, 'should have received CopyOutResponse')
assert(!copy1._remainder, 'Message with no additional data (len=Int4Len+0) should not leave a remainder')
}))
copy1.end(new Buffer.from([code.CopyOutResponse, 0x00, 0x00, 0x00, 0x04]));
}
testComparators();
var testRange = function(top) {
var fromClient = client()
var txt = 'COPY (SELECT * from generate_series(0, ' + (top - 1) + ')) TO STDOUT'
var res;
var stream = fromClient.query(copy(txt))
var done = gonna('finish piping out', 1000, function() {
@@ -24,37 +48,158 @@ var testRange = function(top) {
})
stream.pipe(concat(function(buf) {
var res = buf.toString('utf8')
res = buf.toString('utf8')
}))
stream.on('end', function() {
var expected = _.range(0, top).join('\n') + '\n'
assert.equal(res, expected)
assert.equal(stream.rowCount, top, 'should have rowCount ' + top + ' but got ' + stream.rowCount)
done()
}))
});
}
testRange(10000)
var testLeak = function(rounds) {
var fromClient = client()
var txt = 'COPY (SELECT 10) TO STDOUT'
var testInternalPostgresError = function() {
var cancelClient = client()
var queryClient = client()
var runStream = function(num, callback) {
var stream = fromClient.query(copy(txt))
var runStream = function(callback) {
var txt = "COPY (SELECT pg_sleep(10)) TO STDOUT"
var stream = queryClient.query(copy(txt))
stream.on('data', function(data) {
// Just throw away the data.
})
stream.on('end', callback)
stream.on('error', callback)
setTimeout(function() {
var cancelQuery = "SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query ~ 'pg_sleep' AND NOT query ~ 'pg_cancel_backend'"
cancelClient.query(cancelQuery, function() { cancelClient.end() })
}, 50)
}
async.timesSeries(rounds, runStream, function(err) {
assert.equal(err, null)
assert.equal(fromClient.connection.stream.listeners('close').length, 0)
assert.equal(fromClient.connection.stream.listeners('data').length, 1)
assert.equal(fromClient.connection.stream.listeners('end').length, 2)
assert.equal(fromClient.connection.stream.listeners('error').length, 1)
fromClient.end()
runStream(function(err) {
assert.notEqual(err, null)
var expectedMessage = 'canceling statement due to user request'
assert.notEqual(err.toString().indexOf(expectedMessage), -1, 'Error message should mention reason for query failure.')
queryClient.end()
})
}
testInternalPostgresError()
testLeak(5)
var testNoticeResponse = function() {
// we use a special trick to generate a warning
// on the copy stream.
var queryClient = client()
var set = '';
set += 'SET SESSION client_min_messages = WARNING;'
set += 'SET SESSION standard_conforming_strings = off;'
set += 'SET SESSION escape_string_warning = on;'
queryClient.query(set, function(err, res) {
assert.equal(err, null, 'testNoticeResponse - could not SET parameters')
var runStream = function(callback) {
var txt = "COPY (SELECT '\\\n') TO STDOUT"
var stream = queryClient.query(copy(txt))
stream.on('data', function(data) {
})
stream.on('error', callback)
// make sure stream is pulled from
stream.pipe(concat(callback.bind(null,null)))
}
runStream(function(err) {
assert.equal(err, null, err)
queryClient.end()
})
})
}
testNoticeResponse();
var warnAndReturnOne = `
CREATE OR REPLACE FUNCTION pg_temp.test_warn_return_one()
RETURNS INTEGER
AS $$
BEGIN
RAISE WARNING 'hey, this is returning one';
RETURN 1;
END;
$$ LANGUAGE plpgsql`;
var testInterspersedMessageDoesNotBreakCopyFlow = function() {
var toClient = client();
toClient.query(warnAndReturnOne, (err, res) => {
var q = "COPY (SELECT * FROM pg_temp.test_warn_return_one()) TO STDOUT WITH (FORMAT 'csv', HEADER true)";
var stream = toClient.query(copy(q));
var done = gonna('got expected COPY TO payload', 1000, function() {
toClient.end();
});
stream.pipe(concat(function(buf) {
res = buf.toString('utf8')
}));
stream.on('end', function() {
var expected = "test_warn_return_one\n1\n";
assert.equal(res, expected);
// note the header counts as a row
assert.equal(stream.rowCount, 2, 'should have rowCount = 2 but got ' + stream.rowCount);
done();
});
});
};
testInterspersedMessageDoesNotBreakCopyFlow();
var testInterspersedMessageEmitsWarnign = function() {
var toClient = client();
toClient.query(warnAndReturnOne, (err, res) => {
var q = "COPY (SELECT * FROM pg_temp.test_warn_return_one()) TO STDOUT WITH (FORMAT 'csv', HEADER true)";
var stream = toClient.query(copy(q));
var done = gonna('got expected warning event', 1000, function() {
toClient.end();
});
stream.on('warning', function(msg) {
assert(msg.match(/Got an interspersed message:.*WARNING.*hey, this is returning one/),
'did not get expected warning for interspersed message in COPY TO');
done();
})
});
};
testInterspersedMessageEmitsWarnign();
var testClientReuse = function() {
var c = client();
var limit = 100000;
var countMax = 10;
var countA = countMax;
var countB = 0;
var runStream = function(num, callback) {
var sql = "COPY (SELECT * FROM generate_series(0,"+limit+")) TO STDOUT"
var stream = c.query(copy(sql))
stream.on('error', callback)
stream.pipe(concat(function(buf) {
var res = buf.toString('utf8');
var exp = _.range(0, limit+1).join('\n') + '\n'
assert.equal(res, exp, 'clientReuse: sent & received buffer should be equal')
countB++;
callback();
}))
}
var rs = function(err) {
assert.equal(err, null, err)
countA--;
if (countA) {
runStream(countB, rs)
} else {
assert.equal(countB, countMax, 'clientReuse: there should be countMax queries on the same client')
c.end()
}
};
runStream(countB, rs);
}
testClientReuse();

View File

@@ -1,3 +1,5 @@
'use strict';
require('./copy-from')
require('./copy-to')
require('./binary')