Compare commits
103 Commits
v0.1.0
...
v2.x-carto
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c9157cd1ab | ||
|
|
0acb072906 | ||
|
|
645616c2e0 | ||
|
|
0c787920a4 | ||
|
|
5fec3a5cc1 | ||
|
|
002a36bcfc | ||
|
|
be101502d9 | ||
|
|
c865db412b | ||
|
|
edc0462470 | ||
|
|
d26449cc69 | ||
|
|
6b12cbc876 | ||
|
|
27ff2465ce | ||
|
|
69b5b60e9f | ||
|
|
06cc013e8e | ||
|
|
7115f25ec1 | ||
|
|
38f9ebc606 | ||
|
|
ff39922e86 | ||
|
|
cffae659b8 | ||
|
|
98d8d8a8e0 | ||
|
|
abf76d14fc | ||
|
|
abd92ae4ca | ||
|
|
6249052bc8 | ||
|
|
291e257116 | ||
|
|
78c2d834c7 | ||
|
|
2af7ce57b1 | ||
|
|
90480c7cfa | ||
|
|
df47d58d74 | ||
|
|
7d546a5c34 | ||
|
|
9c46a8223e | ||
|
|
4ff1971113 | ||
|
|
c8661b8afb | ||
|
|
0b716029db | ||
|
|
660931ef1b | ||
|
|
e3c2109c5a | ||
|
|
f01c278615 | ||
|
|
1c3271ad06 | ||
|
|
43bc2cac7b | ||
|
|
5e3934b71f | ||
|
|
69f670ab13 | ||
|
|
455e170084 | ||
|
|
e15feb199a | ||
|
|
191a4ec16a | ||
|
|
399bff7ed7 | ||
|
|
8174e10fb5 | ||
|
|
f29aef3bba | ||
|
|
b2e108571e | ||
|
|
7003f6070f | ||
|
|
ade7ab95a0 | ||
|
|
9ccda04036 | ||
|
|
a5e532f20b | ||
|
|
2a4db2920e | ||
|
|
f155899570 | ||
|
|
18be125596 | ||
|
|
ae5b344395 | ||
|
|
ee84aba89f | ||
|
|
e0aa7db324 | ||
|
|
0f0ddf7ad4 | ||
|
|
ed57e131e9 | ||
|
|
c4a0e6dd58 | ||
|
|
d5b5c8c569 | ||
|
|
6981ea6ac5 | ||
|
|
d2b9677a02 | ||
|
|
bfee86543f | ||
|
|
21f47220fc | ||
|
|
b1b613125f | ||
|
|
4222053744 | ||
|
|
040ed5f4da | ||
|
|
f9ee1c083a | ||
|
|
dcfffd0670 | ||
|
|
12e4ca33b0 | ||
|
|
8f2355e454 | ||
|
|
d6eab36b66 | ||
|
|
33f6ecc11b | ||
|
|
36572a8b7b | ||
|
|
0c5d08edae | ||
|
|
b78a3eb845 | ||
|
|
107f007249 | ||
|
|
25b8d6da5f | ||
|
|
1c0c8871c1 | ||
|
|
beb54334e2 | ||
|
|
1db9b3ec3d | ||
|
|
1822f399d2 | ||
|
|
208861d057 | ||
|
|
1957554301 | ||
|
|
bef3ba5fcd | ||
|
|
591a11c955 | ||
|
|
b40918ddb8 | ||
|
|
b8946265c3 | ||
|
|
d649905dbb | ||
|
|
22da85448e | ||
|
|
531f72dcb3 | ||
|
|
dc6521b7c1 | ||
|
|
16e2001064 | ||
|
|
5bb93308eb | ||
|
|
e5f864bac0 | ||
|
|
bfc0353d2c | ||
|
|
90012d84d7 | ||
|
|
808c284b55 | ||
|
|
b48283f4aa | ||
|
|
5c03015715 | ||
|
|
18fa1fdeb3 | ||
|
|
824936176c | ||
|
|
2f5b37e0ad |
4
.npmignore
Normal file
4
.npmignore
Normal file
@@ -0,0 +1,4 @@
|
||||
.gitignore
|
||||
.travis.yml
|
||||
bench/
|
||||
test/
|
||||
14
.travis.yml
Normal file
14
.travis.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- "6"
|
||||
- "8"
|
||||
- "10"
|
||||
- "11"
|
||||
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
before_install:
|
||||
- npm install npm --global
|
||||
env:
|
||||
- PGUSER=postgres PGDATABASE=postgres
|
||||
19
Makefile
Normal file
19
Makefile
Normal file
@@ -0,0 +1,19 @@
|
||||
.PHONY: publish-patch test
|
||||
|
||||
test:
|
||||
npm test
|
||||
|
||||
patch: test
|
||||
npm version patch -m "Bump version"
|
||||
git push origin master --tags
|
||||
npm publish
|
||||
|
||||
minor: test
|
||||
npm version minor -m "Bump version"
|
||||
git push origin master --tags
|
||||
npm publish
|
||||
|
||||
major: test
|
||||
npm version major -m "Bump version"
|
||||
git push origin master --tags
|
||||
npm publish
|
||||
34
NEWS.carto.md
Normal file
34
NEWS.carto.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# CARTO's Changelog
|
||||
|
||||
## v2.2.0-carto.1
|
||||
Released 2019-mm-dd
|
||||
|
||||
Bug fixes:
|
||||
* Copy to: ensure stream is detached when finished
|
||||
* Copy to: deal with interspersed messages properly. See [#9](https://github.com/CartoDB/node-pg-copy-streams/pull/9)
|
||||
|
||||
## v1.2.0-carto.3
|
||||
Released 2018-11-21
|
||||
|
||||
Features:
|
||||
* Drop support for Node.js 0.12, 4 and, 5.
|
||||
* Add support for Node.js 8 and 10.
|
||||
* Add package-lock.json
|
||||
* Do not use deprecated Buffer constructors.
|
||||
|
||||
## v1.2.0-carto.2
|
||||
Released 2018-10-26
|
||||
|
||||
Bug fixes:
|
||||
* Make all modules to use strict mode semantics.
|
||||
|
||||
## v1.2.0-carto.1
|
||||
Released 2018-06-11
|
||||
|
||||
Bug fixes:
|
||||
* Improves performance of COPY TO by sending bigger chunks through low level `push()`. See https://github.com/CartoDB/node-pg-copy-streams/pull/1
|
||||
|
||||
## v1.2.0
|
||||
Released 2016-08-22
|
||||
|
||||
Vanilla version v1.2.0 from upstream repository. See https://github.com/CartoDB/node-pg-copy-streams/releases/tag/v1.2.0
|
||||
158
README.md
Normal file
158
README.md
Normal file
@@ -0,0 +1,158 @@
|
||||
## pg-copy-streams
|
||||
|
||||
[](https://travis-ci.org/brianc/node-pg-copy-streams)
|
||||
|
||||
COPY FROM / COPY TO for node-postgres. Stream from one database to another, and stuff.
|
||||
|
||||
## how? what? huh?
|
||||
|
||||
Did you know the _all powerful_ PostgreSQL supports streaming binary data directly into and out of a table?
|
||||
This means you can take your favorite CSV or TSV or whatever format file and pipe it directly into an existing PostgreSQL table.
|
||||
You can also take a table and pipe it directly to a file, another database, stdout, even to `/dev/null` if you're crazy!
|
||||
|
||||
What this module gives you is a [Readable](http://nodejs.org/api/stream.html#stream_class_stream_readable) or [Writable](http://nodejs.org/api/stream.html#stream_class_stream_writable) stream directly into/out of a table in your database.
|
||||
This mode of interfacing with your table is _very fast_ and _very brittle_. You are responsible for properly encoding and ordering all your columns. If anything is out of place PostgreSQL will send you back an error. The stream works within a transaction so you wont leave things in a 1/2 borked state, but it's still good to be aware of.
|
||||
|
||||
If you're not familiar with the feature (I wasn't either) you can read this for some good helps: http://www.postgresql.org/docs/9.3/static/sql-copy.html
|
||||
|
||||
## examples
|
||||
|
||||
### pipe from a table to stdout
|
||||
|
||||
```js
|
||||
var {Pool} = require('pg');
|
||||
var copyTo = require('pg-copy-streams').to;
|
||||
|
||||
var pool = new Pool();
|
||||
|
||||
pool.connect(function(err, client, done) {
|
||||
var stream = client.query(copyTo('COPY my_table TO STDOUT'));
|
||||
stream.pipe(process.stdout);
|
||||
stream.on('end', done);
|
||||
stream.on('error', done);
|
||||
});
|
||||
```
|
||||
|
||||
### pipe from a file to table
|
||||
|
||||
```js
|
||||
var fs = require('fs');
|
||||
var {Pool} = require('pg');
|
||||
var copyFrom = require('pg-copy-streams').from;
|
||||
|
||||
var pool = new Pool();
|
||||
|
||||
pool.connect(function(err, client, done) {
|
||||
var stream = client.query(copyFrom('COPY my_table FROM STDIN'));
|
||||
var fileStream = fs.createReadStream('some_file.tsv')
|
||||
fileStream.on('error', done);
|
||||
stream.on('error', done);
|
||||
stream.on('end', done);
|
||||
fileStream.pipe(stream);
|
||||
});
|
||||
```
|
||||
|
||||
*Important*: Even if `pg-copy-streams.from` is used as a Writable (via `pipe`), you should not listen for the 'finish' event and expect that the COPY command has already been correctly acknowledged by the database. Internally, a duplex stream is used to pipe the data into the database connection and the COPY command should be considered complete only when the 'end' event is triggered.
|
||||
|
||||
|
||||
## install
|
||||
|
||||
```sh
|
||||
$ npm install pg-copy-streams
|
||||
```
|
||||
|
||||
## notice
|
||||
|
||||
This module __only__ works with the pure JavaScript bindings. If you're using `require('pg').native` please make sure to use normal `require('pg')` or `require('pg.js')` when you're using copy streams.
|
||||
|
||||
Before you set out on this magical piping journey, you _really_ should read this: http://www.postgresql.org/docs/current/static/sql-copy.html, and you might want to take a look at the [tests](https://github.com/brianc/node-pg-copy-streams/tree/master/test) to get an idea of how things work.
|
||||
|
||||
Take note of the following warning in the PostgreSQL documentation:
|
||||
> COPY stops operation at the first error. This should not lead to problems in the event of a COPY TO, but the target table will already have received earlier rows in a COPY FROM. These rows will not be visible or accessible, but they still occupy disk space. This might amount to a considerable amount of wasted disk space if the failure happened well into a large copy operation. You might wish to invoke VACUUM to recover the wasted space.
|
||||
|
||||
## benchmarks
|
||||
|
||||
The COPY command is commonly used to move huge sets of data. This can put some pressure on the node.js loop, the amount of CPU or the amount of memory used.
|
||||
There is a bench/ directory in the repository where benchmark scripts are stored. If you have performance issues with `pg-copy-stream` do not hesitate to write a new benchmark that highlights your issue. Please avoid to commit huge files (PR won't be accepted) and find other ways to generate huge datasets.
|
||||
|
||||
If you have a local instance of postgres on your machine, you can start a benchmark for example with
|
||||
|
||||
```sh
|
||||
$ cd bench
|
||||
$ PGPORT=5432 PGDATABASE=postgres node copy-from.js
|
||||
```
|
||||
|
||||
## tests
|
||||
|
||||
In order to launch the test suite, you need to have a local instance of postgres running on your machine.
|
||||
|
||||
```sh
|
||||
$ PGPORT=5432 PGDATABASE=postgres make test
|
||||
```
|
||||
|
||||
## contributing
|
||||
|
||||
Instead of adding a bunch more code to the already bloated [node-postgres](https://github.com/brianc/node-postgres) I am trying to make the internals extensible and work on adding edge-case features as 3rd party modules.
|
||||
This is one of those.
|
||||
|
||||
Please, if you have any issues with this, open an issue.
|
||||
|
||||
Better yet, submit a pull request. I _love_ pull requests.
|
||||
|
||||
Generally how I work is if you submit a few pull requests and you're interested I'll make you a contributor and give you full access to everything.
|
||||
|
||||
Since this isn't a module with tons of installs and dependent modules I hope we can work together on this to iterate faster here and make something really useful.
|
||||
|
||||
## changelog
|
||||
|
||||
### version 2.x - published YYYY-MM-DD
|
||||
|
||||
### version 2.2.0 - published YYYY-MM-DD
|
||||
|
||||
* Small refactor in copy-from passing from 3 push to 2 push in every chunk transform loop
|
||||
* Add bench/ directory for benchmarks
|
||||
* Add benchmark to compare performance of pg-copy-stream wrt psql during copy-from
|
||||
* Add benchmark to measure memory usage of copy-from
|
||||
|
||||
### version 2.1.0 - published 2019-03-19
|
||||
|
||||
* Change README to stop using the pg pool singleton (removed after pg 7.0)
|
||||
* Do not register copy-to.pushBufferIfNeeded on the instance itself (avoid dangling method on the object)
|
||||
* Fix copy-to test wrt intermittent unhandled promise bug
|
||||
* Add tests regarding client re-use
|
||||
|
||||
### version 2.0.0 - published 2019-03-14
|
||||
|
||||
This version's major change is a modification in the COPY TO implementation. In the previous version, when a chunk was received from the database, it was analyzed and every row contained within that chunk was pushed individually down the stream pipeline. Small rows could lead to a "one chunk" / "thousands of row pushed" performance issue in node. Thanks to @rafatower & CartoDB for the patch.
|
||||
This is considered to be a major change since some people could be relying on the fact that each outgoing chunk is an individual row.
|
||||
|
||||
Other changes in this version
|
||||
* Use Strict
|
||||
* Travis deprecation of old node version (0.12, 0.4). Support LTS 6, 8, 10 and Current 11
|
||||
* Update dev dependencies (pg, lodash)
|
||||
* Stop using deprecated Buffer constructor
|
||||
* Add package-lock.json
|
||||
|
||||
## license
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Brian M. Carlson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
45
bench/copy-from-memory.js
Normal file
45
bench/copy-from-memory.js
Normal file
@@ -0,0 +1,45 @@
|
||||
var cp = require('duplex-child-process');
|
||||
var pg = require('pg')
|
||||
|
||||
var copy = require('../').from
|
||||
|
||||
var client = function() {
|
||||
var client = new pg.Client()
|
||||
client.connect()
|
||||
return client
|
||||
}
|
||||
|
||||
var inStream = function() {
|
||||
return cp.spawn('seq', ['0', '29999999']);
|
||||
}
|
||||
|
||||
var running = true;
|
||||
|
||||
var c = client();
|
||||
c.query('DROP TABLE IF EXISTS plugnumber', function() {
|
||||
c.query('CREATE TABLE plugnumber (num int)', function() {
|
||||
var seq = inStream()
|
||||
var from = c.query(copy('COPY plugnumber FROM STDIN'))
|
||||
seq.pipe(from);
|
||||
from.on('end', function() {
|
||||
running = false;
|
||||
c.end();
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
var rssMin = process.memoryUsage().rss / 1024 / 1024
|
||||
var rssMax = rssMin
|
||||
|
||||
memlog = function() {
|
||||
var rss = process.memoryUsage().rss / 1024 / 1024
|
||||
rssMin = Math.min(rss, rssMin)
|
||||
rssMax = Math.max(rss, rssMax)
|
||||
console.log('rss:' + Math.round(rss*100)/100 + 'MB rssMin:'+ Math.round(rssMin*100)/100 + 'MB rssMax:' + Math.round(rssMax*100)/100 + 'MB')
|
||||
if (running) {
|
||||
setTimeout(memlog, 1000);
|
||||
}
|
||||
}
|
||||
|
||||
memlog()
|
||||
86
bench/copy-from.js
Normal file
86
bench/copy-from.js
Normal file
@@ -0,0 +1,86 @@
|
||||
var Benchmark = require('benchmark');
|
||||
var cp = require('duplex-child-process');
|
||||
var pg = require('pg')
|
||||
|
||||
var copy = require('../').from
|
||||
|
||||
var client = function() {
|
||||
var client = new pg.Client()
|
||||
client.connect()
|
||||
return client
|
||||
}
|
||||
|
||||
var psql = '/opt/postgresql-9.6.1/bin/psql'
|
||||
var limit = 999999;
|
||||
var inStream = function() {
|
||||
return cp.spawn('seq', ['0', ''+limit]);
|
||||
}
|
||||
var suite = new Benchmark.Suite;
|
||||
suite
|
||||
.add({
|
||||
name: 'unix pipe into psql COPY',
|
||||
defer: true,
|
||||
fn: function(d) {
|
||||
var c = client();
|
||||
c.query('DROP TABLE IF EXISTS plugnumber', function() {
|
||||
c.query('CREATE TABLE plugnumber (num int)', function() {
|
||||
c.end();
|
||||
var from = cp.spawn('sh', ['-c', 'seq 0 '+limit+' | '+psql+' postgres -c \'COPY plugnumber FROM STDIN\''])
|
||||
from.on('close', function() {
|
||||
d.resolve();
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
.add({
|
||||
name: 'pipe into psql COPY',
|
||||
defer: true,
|
||||
fn: function(d) {
|
||||
var c = client();
|
||||
c.query('DROP TABLE IF EXISTS plugnumber', function() {
|
||||
c.query('CREATE TABLE plugnumber (num int)', function() {
|
||||
c.end();
|
||||
var seq = inStream();
|
||||
var from = cp.spawn(psql, ['postgres', '-c', 'COPY plugnumber FROM STDIN'])
|
||||
seq.pipe(from);
|
||||
from.on('close', function() {
|
||||
d.resolve();
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
.add({
|
||||
name: 'pipe into pg-copy-stream COPY',
|
||||
defer: true,
|
||||
fn: function(d) {
|
||||
var c = client();
|
||||
c.query('DROP TABLE IF EXISTS plugnumber', function() {
|
||||
c.query('CREATE TABLE plugnumber (num int)', function() {
|
||||
var seq = inStream()
|
||||
var from = c.query(copy('COPY plugnumber FROM STDIN'))
|
||||
seq.pipe(from);
|
||||
from.on('end', function() {
|
||||
c.end();
|
||||
d.resolve();
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
.on('cycle', function(event) {
|
||||
console.log(String(event.target));
|
||||
})
|
||||
.on('complete', function() {
|
||||
console.log('Fastest is ' + this.filter('fastest').map('name'));
|
||||
});
|
||||
|
||||
|
||||
var c = client()
|
||||
c.query('DROP TABLE IF EXISTS plugnumber', function() {
|
||||
c.end();
|
||||
suite.run();
|
||||
})
|
||||
|
||||
143
copy-to.js
143
copy-to.js
@@ -1,88 +1,122 @@
|
||||
module.exports = function(txt) {
|
||||
return new CopyStreamQuery(txt)
|
||||
'use strict';
|
||||
|
||||
module.exports = function(txt, options) {
|
||||
return new CopyStreamQuery(txt, options)
|
||||
}
|
||||
|
||||
var Transform = require('stream').Transform
|
||||
var util = require('util')
|
||||
var code = require('./message-formats')
|
||||
|
||||
var CopyStreamQuery = function(text) {
|
||||
Transform.call(this)
|
||||
var CopyStreamQuery = function(text, options) {
|
||||
Transform.call(this, options)
|
||||
this.text = text
|
||||
this._listeners = null
|
||||
this._copyOutResponse = null
|
||||
this._gotCopyOutResponse = false
|
||||
this.rowCount = 0
|
||||
}
|
||||
|
||||
util.inherits(CopyStreamQuery, Transform)
|
||||
|
||||
var eventTypes = ['close', 'data', 'end', 'error']
|
||||
|
||||
CopyStreamQuery.prototype.submit = function(connection) {
|
||||
connection.query(this.text)
|
||||
this.connection = connection
|
||||
this._listeners = connection.stream.listeners('data')
|
||||
connection.stream.removeAllListeners('data')
|
||||
this.connection.removeAllListeners('copyData')
|
||||
this.on('end', () => this._detach())
|
||||
connection.stream.pipe(this)
|
||||
}
|
||||
|
||||
var code = {
|
||||
E: 69, //Error
|
||||
H: 72, //CopyOutResponse
|
||||
d: 0x64, //CopyData
|
||||
c: 0x63 //CopyDone
|
||||
}
|
||||
|
||||
CopyStreamQuery.prototype._detach = function() {
|
||||
this.connection.stream.unpipe()
|
||||
this.connection.stream.removeAllListeners('data')
|
||||
var self = this
|
||||
this._listeners.forEach(function(listener) {
|
||||
self.connection.stream.on('data', listener)
|
||||
})
|
||||
this.connection.stream.unpipe(this)
|
||||
// Unpipe can drop us out of flowing mode
|
||||
this.connection.stream.resume()
|
||||
}
|
||||
|
||||
CopyStreamQuery.prototype._transform = function(chunk, enc, cb) {
|
||||
var offset = 0
|
||||
var Byte1Len = 1;
|
||||
var Int32Len = 4;
|
||||
if(this._remainder && chunk) {
|
||||
chunk = Buffer.concat([this._remainder, chunk])
|
||||
}
|
||||
if(!this._copyOutResponse) {
|
||||
this._copyOutResponse = true
|
||||
if(chunk[0] == code.E) {
|
||||
this._detach()
|
||||
this.connection.stream.unshift(chunk)
|
||||
this.push(null)
|
||||
return cb();
|
||||
|
||||
var length;
|
||||
var messageCode;
|
||||
var needPush = false;
|
||||
|
||||
var buffer = Buffer.alloc(chunk.length);
|
||||
var buffer_offset = 0;
|
||||
|
||||
var self = this;
|
||||
var pushBufferIfneeded = function() {
|
||||
if (needPush && buffer_offset > 0) {
|
||||
self.push(buffer.slice(0, buffer_offset))
|
||||
buffer_offset = 0;
|
||||
}
|
||||
if(chunk[0] != code.H) {
|
||||
this.emit('error', new Error('Expected copy out response'))
|
||||
}
|
||||
var length = chunk.readUInt32BE(1)
|
||||
offset = 1
|
||||
offset += length
|
||||
}
|
||||
while((chunk.length - offset) > 5) {
|
||||
|
||||
while((chunk.length - offset) >= (Byte1Len + Int32Len)) {
|
||||
var messageCode = chunk[offset]
|
||||
//complete
|
||||
if(messageCode == code.c) {
|
||||
this._detach()
|
||||
this.connection.stream.unshift(chunk.slice(offset + 5))
|
||||
this.push(null)
|
||||
return cb();
|
||||
|
||||
//console.log('PostgreSQL message ' + String.fromCharCode(messageCode))
|
||||
switch(messageCode) {
|
||||
|
||||
// detect COPY start
|
||||
case code.CopyOutResponse:
|
||||
if (!this._gotCopyOutResponse) {
|
||||
this._gotCopyOutResponse = true
|
||||
} else {
|
||||
this.emit('error', new Error('Unexpected CopyOutResponse message (H)'))
|
||||
}
|
||||
break;
|
||||
|
||||
// meaningful row
|
||||
case code.CopyData:
|
||||
needPush = true;
|
||||
break;
|
||||
|
||||
// standard interspersed messages. discard
|
||||
case code.ParameterStatus:
|
||||
case code.NoticeResponse:
|
||||
case code.NotificationResponse:
|
||||
break;
|
||||
|
||||
case code.ErrorResponse:
|
||||
case code.CopyDone:
|
||||
pushBufferIfneeded();
|
||||
this.push(null)
|
||||
return cb();
|
||||
break;
|
||||
default:
|
||||
this.emit('error', new Error('Unexpected PostgreSQL message ' + String.fromCharCode(messageCode)))
|
||||
}
|
||||
//something bad happened
|
||||
if(messageCode != code.d) {
|
||||
return this.emit('error', new Error('expected "d" (copydata message)'))
|
||||
}
|
||||
var length = chunk.readUInt32BE(offset + 1) - 4 //subtract length of UInt32
|
||||
//can we read the next row?
|
||||
if(chunk.length > (offset + length + 5)) {
|
||||
offset += 5
|
||||
var slice = chunk.slice(offset, offset + length)
|
||||
offset += length
|
||||
this.push(slice)
|
||||
this.emit('row')
|
||||
|
||||
length = chunk.readUInt32BE(offset+Byte1Len)
|
||||
if(chunk.length >= (offset + Byte1Len + length)) {
|
||||
offset += Byte1Len + Int32Len
|
||||
var message = chunk.slice(offset, offset + length - Int32Len)
|
||||
switch(messageCode) {
|
||||
case code.CopyData:
|
||||
this.rowCount++;
|
||||
message.copy(buffer, buffer_offset);
|
||||
buffer_offset += message.length;
|
||||
break;
|
||||
case code.ParameterStatus:
|
||||
case code.NoticeResponse:
|
||||
case code.NotificationResponse:
|
||||
this.emit('warning', 'Got an interspersed message: ' + message);
|
||||
break;
|
||||
}
|
||||
offset += (length - Int32Len);
|
||||
} else {
|
||||
// we need more chunks for a complete message
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pushBufferIfneeded();
|
||||
|
||||
if(chunk.length - offset) {
|
||||
var slice = chunk.slice(offset)
|
||||
this._remainder = slice
|
||||
@@ -96,6 +130,9 @@ CopyStreamQuery.prototype.handleError = function(e) {
|
||||
this.emit('error', e)
|
||||
}
|
||||
|
||||
CopyStreamQuery.prototype.handleCopyData = function(chunk) {
|
||||
}
|
||||
|
||||
CopyStreamQuery.prototype.handleCommandComplete = function() {
|
||||
}
|
||||
|
||||
|
||||
58
index.js
58
index.js
@@ -1,22 +1,25 @@
|
||||
var CopyToQueryStream = require('./copy-to')
|
||||
'use strict';
|
||||
|
||||
var CopyToQueryStream = require('./copy-to')
|
||||
module.exports = {
|
||||
to: function(txt) {
|
||||
return new CopyToQueryStream(txt)
|
||||
to: function(txt, options) {
|
||||
return new CopyToQueryStream(txt, options)
|
||||
},
|
||||
from: function (txt) {
|
||||
return new CopyStreamQuery(txt)
|
||||
from: function (txt, options) {
|
||||
return new CopyStreamQuery(txt, options)
|
||||
}
|
||||
}
|
||||
|
||||
var Transform = require('stream').Transform
|
||||
var util = require('util')
|
||||
var code = require('./message-formats')
|
||||
|
||||
var CopyStreamQuery = function(text) {
|
||||
Transform.call(this)
|
||||
var CopyStreamQuery = function(text, options) {
|
||||
Transform.call(this, options)
|
||||
this.text = text
|
||||
this._listeners = null
|
||||
this._copyOutResponse = null
|
||||
this.rowCount = 0
|
||||
}
|
||||
|
||||
util.inherits(CopyStreamQuery, Transform)
|
||||
@@ -26,41 +29,46 @@ CopyStreamQuery.prototype.submit = function(connection) {
|
||||
connection.query(this.text)
|
||||
}
|
||||
|
||||
var code = {
|
||||
H: 72, //CopyOutResponse
|
||||
d: 0x64, //CopyData
|
||||
c: 0x63 //CopyDone
|
||||
}
|
||||
|
||||
var copyDataBuffer = Buffer([code.d])
|
||||
CopyStreamQuery.prototype._transform = function(chunk, enc, cb) {
|
||||
this.push(copyDataBuffer)
|
||||
var lenBuffer = Buffer(4)
|
||||
lenBuffer.writeUInt32BE(chunk.length + 4, 0)
|
||||
var Int32Len = 4;
|
||||
var lenBuffer = Buffer.from([code.CopyData, 0, 0, 0, 0])
|
||||
lenBuffer.writeUInt32BE(chunk.length + Int32Len, 1)
|
||||
this.push(lenBuffer)
|
||||
this.push(chunk)
|
||||
this.emit('row')
|
||||
cb()
|
||||
}
|
||||
|
||||
CopyStreamQuery.prototype._flush = function(cb) {
|
||||
var finBuffer = Buffer([code.c, 0, 0, 0, 4])
|
||||
var Int32Len = 4;
|
||||
var finBuffer = Buffer.from([code.CopyDone, 0, 0, 0, Int32Len])
|
||||
this.push(finBuffer)
|
||||
//never call this callback, do not close underlying stream
|
||||
//cb()
|
||||
this.cb_flush = cb
|
||||
}
|
||||
|
||||
CopyStreamQuery.prototype.handleError = function(e) {
|
||||
this.emit('error', e)
|
||||
}
|
||||
|
||||
CopyStreamQuery.prototype.streamData = function(connection) {
|
||||
this.pipe(connection.stream)
|
||||
CopyStreamQuery.prototype.handleCopyInResponse = function(connection) {
|
||||
this.pipe(connection.stream, { end: false })
|
||||
}
|
||||
|
||||
CopyStreamQuery.prototype.handleCommandComplete = function() {
|
||||
this.unpipe()
|
||||
this.emit('end')
|
||||
CopyStreamQuery.prototype.handleCommandComplete = function(msg) {
|
||||
// Parse affected row count as in
|
||||
// https://github.com/brianc/node-postgres/blob/35e5567f86774f808c2a8518dd312b8aa3586693/lib/result.js#L37
|
||||
var match = /COPY (\d+)/.exec((msg || {}).text)
|
||||
if (match) {
|
||||
this.rowCount = parseInt(match[1], 10)
|
||||
}
|
||||
|
||||
// we delay the _flush cb so that the 'end' event is
|
||||
// triggered after CommandComplete
|
||||
this.cb_flush()
|
||||
|
||||
// unpipe from connection
|
||||
this.unpipe(this.connection)
|
||||
this.connection = null
|
||||
}
|
||||
|
||||
CopyStreamQuery.prototype.handleReadyForQuery = function() {
|
||||
|
||||
25
message-formats.js
Normal file
25
message-formats.js
Normal file
@@ -0,0 +1,25 @@
|
||||
/**
|
||||
* The COPY feature uses the following protocol codes.
|
||||
* The codes for the most recent protocol version are documented on
|
||||
* https://www.postgresql.org/docs/current/static/protocol-message-formats.html
|
||||
*
|
||||
* The protocol flow itself is described on
|
||||
* https://www.postgresql.org/docs/current/static/protocol-flow.html
|
||||
*/
|
||||
module.exports = {
|
||||
ErrorResponse: 0x45, // E
|
||||
CopyInResponse: 0x47, // G
|
||||
CopyOutResponse: 0x48, // H
|
||||
CopyBothResponse: 0x57, // W
|
||||
CopyDone: 0x63, // c
|
||||
CopyData: 0x64, // d
|
||||
CopyFail: 0x66, // f
|
||||
|
||||
// It is possible for NoticeResponse and ParameterStatus messages to be interspersed between CopyData messages;
|
||||
// frontends must handle these cases, and should be prepared for other asynchronous message types as well
|
||||
// (see Section 50.2.6).
|
||||
// Otherwise, any message type other than CopyData or CopyDone may be treated as terminating copy-out mode.
|
||||
NotificationResponse: 0x41, // A
|
||||
NoticeResponse: 0x4E, // N
|
||||
ParameterStatus: 0x53 // S
|
||||
}
|
||||
215
package-lock.json
generated
Normal file
215
package-lock.json
generated
Normal file
@@ -0,0 +1,215 @@
|
||||
{
|
||||
"name": "pg-copy-streams",
|
||||
"version": "2.2.0",
|
||||
"lockfileVersion": 1,
|
||||
"requires": true,
|
||||
"dependencies": {
|
||||
"async": {
|
||||
"version": "0.2.10",
|
||||
"resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz",
|
||||
"integrity": "sha1-trvgsGdLnXGXCMo43owjfLUmw9E=",
|
||||
"dev": true
|
||||
},
|
||||
"base64-js": {
|
||||
"version": "0.0.2",
|
||||
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-0.0.2.tgz",
|
||||
"integrity": "sha1-Ak8Pcq+iW3X5wO5zzU9V7Bvtl4Q=",
|
||||
"dev": true
|
||||
},
|
||||
"benchmark": {
|
||||
"version": "2.1.4",
|
||||
"resolved": "https://registry.npmjs.org/benchmark/-/benchmark-2.1.4.tgz",
|
||||
"integrity": "sha1-CfPeMckWQl1JjMLuVloOvzwqVik=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"lodash": "^4.17.4",
|
||||
"platform": "^1.3.3"
|
||||
}
|
||||
},
|
||||
"bops": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/bops/-/bops-0.0.6.tgz",
|
||||
"integrity": "sha1-CC0dVfoB5g29wuvC26N/ZZVUzzo=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"base64-js": "0.0.2",
|
||||
"to-utf8": "0.0.1"
|
||||
}
|
||||
},
|
||||
"buffer-writer": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/buffer-writer/-/buffer-writer-2.0.0.tgz",
|
||||
"integrity": "sha512-a7ZpuTZU1TRtnwyCNW3I5dc0wWNC3VR9S++Ewyk2HHZdrO3CQJqSpd+95Us590V6AL7JqUAH2IwZ/398PmNFgw==",
|
||||
"dev": true
|
||||
},
|
||||
"concat-stream": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.1.0.tgz",
|
||||
"integrity": "sha1-hCae/YzGUCdeMi8wnfRIZ7xRxfM=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"bops": "0.0.6"
|
||||
}
|
||||
},
|
||||
"duplex-child-process": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/duplex-child-process/-/duplex-child-process-1.0.0.tgz",
|
||||
"integrity": "sha1-SpSXQob7x4QNCFPSs/5ZCp20YUc=",
|
||||
"dev": true
|
||||
},
|
||||
"gonna": {
|
||||
"version": "0.0.0",
|
||||
"resolved": "https://registry.npmjs.org/gonna/-/gonna-0.0.0.tgz",
|
||||
"integrity": "sha1-6k4ZsVJ6F4LhJQVeMCSabUvHmlk=",
|
||||
"dev": true
|
||||
},
|
||||
"heroku-env": {
|
||||
"version": "0.1.1",
|
||||
"resolved": "https://registry.npmjs.org/heroku-env/-/heroku-env-0.1.1.tgz",
|
||||
"integrity": "sha1-wGeRyUTpuHSOMXf1S/cBQyZ+Yxc=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"parse-database-url": "~0.2.0"
|
||||
}
|
||||
},
|
||||
"lodash": {
|
||||
"version": "4.17.11",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz",
|
||||
"integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==",
|
||||
"dev": true
|
||||
},
|
||||
"packet-reader": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/packet-reader/-/packet-reader-1.0.0.tgz",
|
||||
"integrity": "sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ==",
|
||||
"dev": true
|
||||
},
|
||||
"parse-database-url": {
|
||||
"version": "0.2.2",
|
||||
"resolved": "https://registry.npmjs.org/parse-database-url/-/parse-database-url-0.2.2.tgz",
|
||||
"integrity": "sha1-SGFa56fA/HfjKU0jVCpqUnPDVws=",
|
||||
"dev": true
|
||||
},
|
||||
"pg": {
|
||||
"version": "7.8.2",
|
||||
"resolved": "https://registry.npmjs.org/pg/-/pg-7.8.2.tgz",
|
||||
"integrity": "sha512-5U4fjV43DnQxelkhyPdU3YfUbYVa21bNmreXRCM/gFFw09YxWaitWWITm/u0twUNF5EYOSDhkgyEAocgtpP9JQ==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"buffer-writer": "2.0.0",
|
||||
"packet-reader": "1.0.0",
|
||||
"pg-connection-string": "0.1.3",
|
||||
"pg-pool": "^2.0.4",
|
||||
"pg-types": "~2.0.0",
|
||||
"pgpass": "1.x",
|
||||
"semver": "4.3.2"
|
||||
}
|
||||
},
|
||||
"pg-connection-string": {
|
||||
"version": "0.1.3",
|
||||
"resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-0.1.3.tgz",
|
||||
"integrity": "sha1-2hhHsglA5C7hSSvq9l1J2RskXfc=",
|
||||
"dev": true
|
||||
},
|
||||
"pg-int8": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz",
|
||||
"integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==",
|
||||
"dev": true
|
||||
},
|
||||
"pg-pool": {
|
||||
"version": "2.0.6",
|
||||
"resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-2.0.6.tgz",
|
||||
"integrity": "sha512-hod2zYQxM8Gt482q+qONGTYcg/qVcV32VHVPtktbBJs0us3Dj7xibISw0BAAXVMCzt8A/jhfJvpZaxUlqtqs0g==",
|
||||
"dev": true
|
||||
},
|
||||
"pg-types": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.0.0.tgz",
|
||||
"integrity": "sha512-THUD7gQll5tys+5eQ8Rvs7DjHiIC3bLqixk3gMN9Hu8UrCBAOjf35FoI39rTGGc3lM2HU/R+Knpxvd11mCwOMA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"pg-int8": "1.0.1",
|
||||
"postgres-array": "~2.0.0",
|
||||
"postgres-bytea": "~1.0.0",
|
||||
"postgres-date": "~1.0.0",
|
||||
"postgres-interval": "^1.1.0"
|
||||
}
|
||||
},
|
||||
"pgpass": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.2.tgz",
|
||||
"integrity": "sha1-Knu0G2BltnkH6R2hsHwYR8h3swY=",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"split": "^1.0.0"
|
||||
}
|
||||
},
|
||||
"platform": {
|
||||
"version": "1.3.5",
|
||||
"resolved": "https://registry.npmjs.org/platform/-/platform-1.3.5.tgz",
|
||||
"integrity": "sha512-TuvHS8AOIZNAlE77WUDiR4rySV/VMptyMfcfeoMgs4P8apaZM3JrnbzBiixKUv+XR6i+BXrQh8WAnjaSPFO65Q==",
|
||||
"dev": true
|
||||
},
|
||||
"postgres-array": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz",
|
||||
"integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==",
|
||||
"dev": true
|
||||
},
|
||||
"postgres-bytea": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz",
|
||||
"integrity": "sha1-AntTPAqokOJtFy1Hz5zOzFIazTU=",
|
||||
"dev": true
|
||||
},
|
||||
"postgres-date": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.3.tgz",
|
||||
"integrity": "sha1-4tiXAu/bJY/52c7g/pG9BpdSV6g=",
|
||||
"dev": true
|
||||
},
|
||||
"postgres-interval": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz",
|
||||
"integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"xtend": "^4.0.0"
|
||||
}
|
||||
},
|
||||
"semver": {
|
||||
"version": "4.3.2",
|
||||
"resolved": "https://registry.npmjs.org/semver/-/semver-4.3.2.tgz",
|
||||
"integrity": "sha1-x6BxWKgL7dBSNVt3DYLWZA+AO+c=",
|
||||
"dev": true
|
||||
},
|
||||
"split": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz",
|
||||
"integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"through": "2"
|
||||
}
|
||||
},
|
||||
"through": {
|
||||
"version": "2.3.8",
|
||||
"resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
|
||||
"integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
|
||||
"dev": true
|
||||
},
|
||||
"to-utf8": {
|
||||
"version": "0.0.1",
|
||||
"resolved": "https://registry.npmjs.org/to-utf8/-/to-utf8-0.0.1.tgz",
|
||||
"integrity": "sha1-0Xrqcv8vujm55DYBvns/9y4ImFI=",
|
||||
"dev": true
|
||||
},
|
||||
"xtend": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz",
|
||||
"integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68=",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
}
|
||||
11
package.json
11
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "pg-copy-streams",
|
||||
"version": "0.1.0",
|
||||
"version": "2.2.0",
|
||||
"description": "Low-Level COPY TO and COPY FROM streams for PostgreSQL in JavaScript using",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
@@ -23,10 +23,13 @@
|
||||
"url": "https://github.com/brianc/node-pg-copy-streams/issues"
|
||||
},
|
||||
"devDependencies": {
|
||||
"pg.js": "~2.8.1",
|
||||
"async": "~0.2.10",
|
||||
"benchmark": "^2.1.4",
|
||||
"concat-stream": "~1.1.0",
|
||||
"duplex-child-process": "^1.0.0",
|
||||
"gonna": "0.0.0",
|
||||
"lodash": "~2.2.1",
|
||||
"heroku-env": "~0.1.1"
|
||||
"heroku-env": "~0.1.1",
|
||||
"lodash": "^4.17.11",
|
||||
"pg": "^7.8.2"
|
||||
}
|
||||
}
|
||||
|
||||
69
test/binary.js
Normal file
69
test/binary.js
Normal file
@@ -0,0 +1,69 @@
|
||||
'use strict';
|
||||
|
||||
var assert = require('assert')
|
||||
var gonna = require('gonna')
|
||||
|
||||
var async = require('async')
|
||||
var concat = require('concat-stream')
|
||||
var _ = require('lodash')
|
||||
var pg = require('pg')
|
||||
|
||||
var from = require('../').from
|
||||
var to = require('../').to
|
||||
|
||||
var testBinaryCopy = function() {
|
||||
var client = function() {
|
||||
var client = new pg.Client()
|
||||
client.connect()
|
||||
return client
|
||||
}
|
||||
|
||||
var fromClient = client()
|
||||
var toClient = client()
|
||||
|
||||
var queries = [
|
||||
'DROP TABLE IF EXISTS data',
|
||||
'CREATE TABLE IF NOT EXISTS data (num BIGINT, word TEXT)',
|
||||
'INSERT INTO data (num, word) VALUES (1, \'hello\'), (2, \'other thing\'), (3, \'goodbye\')',
|
||||
'DROP TABLE IF EXISTS data_copy',
|
||||
'CREATE TABLE IF NOT EXISTS data_copy (LIKE data INCLUDING ALL)'
|
||||
]
|
||||
|
||||
async.eachSeries(queries, _.bind(fromClient.query, fromClient), function(err) {
|
||||
assert.ifError(err)
|
||||
|
||||
var fromStream = fromClient.query(to('COPY (SELECT * FROM data) TO STDOUT BINARY'))
|
||||
var toStream = toClient.query(from('COPY data_copy FROM STDIN BINARY'))
|
||||
|
||||
var runStream = function(callback) {
|
||||
fromStream.on('error', callback)
|
||||
toStream.on('error', callback)
|
||||
toStream.on('finish', callback)
|
||||
fromStream.pipe(toStream)
|
||||
}
|
||||
runStream(function(err) {
|
||||
assert.ifError(err)
|
||||
|
||||
toClient.query('SELECT * FROM data_copy ORDER BY num', function(err, res){
|
||||
assert.equal(res.rowCount, 3, 'expected 3 rows but got ' + res.rowCount)
|
||||
assert.equal(res.rows[0].num, 1)
|
||||
assert.equal(res.rows[0].word, 'hello')
|
||||
assert.equal(res.rows[1].num, 2)
|
||||
assert.equal(res.rows[1].word, 'other thing')
|
||||
assert.equal(res.rows[2].num, 3)
|
||||
assert.equal(res.rows[2].word, 'goodbye')
|
||||
queries = [
|
||||
'DROP TABLE data',
|
||||
'DROP TABLE data_copy'
|
||||
]
|
||||
async.each(queries, _.bind(fromClient.query, fromClient), function(err) {
|
||||
assert.ifError(err)
|
||||
fromClient.end()
|
||||
toClient.end()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
testBinaryCopy()
|
||||
@@ -1,31 +1,39 @@
|
||||
'use strict';
|
||||
|
||||
var assert = require('assert')
|
||||
var gonna = require('gonna')
|
||||
|
||||
var concat = require('concat-stream')
|
||||
var _ = require('lodash')
|
||||
var pg = require('pg.js')
|
||||
var pg = require('pg')
|
||||
|
||||
var copy = require('../').from
|
||||
|
||||
var client = function() {
|
||||
var client = new pg.Client()
|
||||
client.connect()
|
||||
return client
|
||||
}
|
||||
|
||||
var testConstruction = function() {
|
||||
var highWaterMark = 10
|
||||
var stream = copy('COPY numbers FROM STDIN', {highWaterMark: 10, objectMode: true})
|
||||
for(var i = 0; i < highWaterMark * 1.5; i++) {
|
||||
stream.write('1\t2\n')
|
||||
}
|
||||
assert(!stream.write('1\t2\n'), 'Should correctly set highWaterMark.')
|
||||
}
|
||||
|
||||
testConstruction()
|
||||
|
||||
var testRange = function(top) {
|
||||
var client = function() {
|
||||
var client = new pg.Client()
|
||||
client.connect()
|
||||
client.query('CREATE TEMP TABLE numbers(num int, bigger_num int)')
|
||||
return client
|
||||
}
|
||||
|
||||
var fromClient = client()
|
||||
var copy = require('../').from
|
||||
|
||||
fromClient.query('CREATE TEMP TABLE numbers(num int, bigger_num int)')
|
||||
|
||||
var txt = 'COPY numbers FROM STDIN'
|
||||
|
||||
var stream = fromClient.query(copy(txt))
|
||||
var rowEmitCount = 0
|
||||
stream.on('row', function() {
|
||||
rowEmitCount++
|
||||
})
|
||||
for(var i = 0; i < top; i++) {
|
||||
stream.write(Buffer('' + i + '\t' + i*10 + '\n'))
|
||||
stream.write(Buffer.from('' + i + '\t' + i*10 + '\n'))
|
||||
}
|
||||
stream.end()
|
||||
var countDone = gonna('have correct count')
|
||||
@@ -33,10 +41,11 @@ var testRange = function(top) {
|
||||
fromClient.query('SELECT COUNT(*) FROM numbers', function(err, res) {
|
||||
assert.ifError(err)
|
||||
assert.equal(res.rows[0].count, top, 'expected ' + top + ' rows but got ' + res.rows[0].count)
|
||||
console.log('found ', res.rows.length, 'rows')
|
||||
assert.equal(stream.rowCount, top, 'expected ' + top + ' rows but db count is ' + stream.rowCount)
|
||||
//console.log('found ', res.rows.length, 'rows')
|
||||
countDone()
|
||||
var firstRowDone = gonna('have correct result')
|
||||
assert.equal(rowEmitCount, top, 'should have emitted "row" event ' + top + ' times')
|
||||
assert.equal(stream.rowCount, top, 'should have rowCount ' + top + ' ')
|
||||
fromClient.query('SELECT (max(num)) AS num FROM numbers', function(err, res) {
|
||||
assert.ifError(err)
|
||||
assert.equal(res.rows[0].num, top-1)
|
||||
@@ -48,3 +57,48 @@ var testRange = function(top) {
|
||||
}
|
||||
|
||||
testRange(1000)
|
||||
|
||||
var testSingleEnd = function() {
|
||||
var fromClient = client()
|
||||
fromClient.query('CREATE TEMP TABLE numbers(num int)')
|
||||
var txt = 'COPY numbers FROM STDIN';
|
||||
var stream = fromClient.query(copy(txt))
|
||||
var count = 0;
|
||||
stream.on('end', function() {
|
||||
count++;
|
||||
assert(count==1, '`end` Event was triggered ' + count + ' times');
|
||||
if (count == 1) fromClient.end();
|
||||
})
|
||||
stream.end(Buffer.from('1\n'))
|
||||
|
||||
}
|
||||
testSingleEnd()
|
||||
|
||||
var testClientReuse = function() {
|
||||
var fromClient = client()
|
||||
fromClient.query('CREATE TEMP TABLE numbers(num int)')
|
||||
var txt = 'COPY numbers FROM STDIN';
|
||||
var count = 0;
|
||||
var countMax = 2;
|
||||
var card = 100000;
|
||||
var runStream = function() {
|
||||
var stream = fromClient.query(copy(txt))
|
||||
stream.on('end', function() {
|
||||
count++;
|
||||
if (count<countMax) {
|
||||
runStream()
|
||||
} else {
|
||||
fromClient.query('SELECT sum(num) AS s FROM numbers', function(err, res) {
|
||||
var total = countMax * card * (card+1)
|
||||
assert.equal(res.rows[0].s, total, 'copy-from.ClientReuse wrong total')
|
||||
fromClient.end()
|
||||
})
|
||||
}
|
||||
})
|
||||
stream.write(Buffer.from(_.range(0, card+1).join('\n') + '\n'))
|
||||
stream.end(Buffer.from(_.range(0, card+1).join('\n') + '\n'))
|
||||
}
|
||||
runStream();
|
||||
}
|
||||
testClientReuse()
|
||||
|
||||
|
||||
203
test/copy-to.js
203
test/copy-to.js
@@ -1,38 +1,205 @@
|
||||
'use strict';
|
||||
|
||||
var assert = require('assert')
|
||||
var gonna = require('gonna')
|
||||
|
||||
var concat = require('concat-stream')
|
||||
var _ = require('lodash')
|
||||
var pg = require('pg.js')
|
||||
var async = require('async')
|
||||
var concat = require('concat-stream')
|
||||
var pg = require('pg')
|
||||
|
||||
var copy = require('../').to
|
||||
var code = require('../message-formats')
|
||||
|
||||
var client = function() {
|
||||
var client = new pg.Client()
|
||||
client.connect()
|
||||
return client
|
||||
}
|
||||
|
||||
var testConstruction = function() {
|
||||
var txt = 'COPY (SELECT * FROM generate_series(0, 10)) TO STDOUT'
|
||||
var stream = copy(txt, {highWaterMark: 10})
|
||||
assert.equal(stream._readableState.highWaterMark, 10, 'Client should have been set with a correct highWaterMark.')
|
||||
}
|
||||
testConstruction()
|
||||
|
||||
var testComparators = function() {
|
||||
var copy1 = copy();
|
||||
copy1.pipe(concat(function(buf) {
|
||||
assert(copy1._gotCopyOutResponse, 'should have received CopyOutResponse')
|
||||
assert(!copy1._remainder, 'Message with no additional data (len=Int4Len+0) should not leave a remainder')
|
||||
}))
|
||||
copy1.end(new Buffer.from([code.CopyOutResponse, 0x00, 0x00, 0x00, 0x04]));
|
||||
|
||||
|
||||
}
|
||||
testComparators();
|
||||
|
||||
var testRange = function(top) {
|
||||
var client = function() {
|
||||
var client = new pg.Client()
|
||||
client.connect()
|
||||
return client
|
||||
}
|
||||
|
||||
var fromClient = client()
|
||||
var copy = require('../').to
|
||||
|
||||
var txt = 'COPY (SELECT * from generate_series(0, ' + (top - 1) + ')) TO STDOUT'
|
||||
var res;
|
||||
|
||||
|
||||
var stream = fromClient.query(copy(txt))
|
||||
var rowEmitCount = 0
|
||||
stream.on('row', function() {
|
||||
rowEmitCount++
|
||||
})
|
||||
var done = gonna('finish piping out', 1000, function() {
|
||||
fromClient.end()
|
||||
})
|
||||
|
||||
stream.pipe(concat(function(buf) {
|
||||
var res = buf.toString('utf8')
|
||||
res = buf.toString('utf8')
|
||||
}))
|
||||
|
||||
stream.on('end', function() {
|
||||
var expected = _.range(0, top).join('\n') + '\n'
|
||||
assert.equal(res, expected)
|
||||
assert.equal(rowEmitCount, top, 'should have emitted "row" ' + top + ' times but got ' + rowEmitCount)
|
||||
assert.equal(stream.rowCount, top, 'should have rowCount ' + top + ' but got ' + stream.rowCount)
|
||||
done()
|
||||
}))
|
||||
});
|
||||
}
|
||||
|
||||
testRange(10000)
|
||||
|
||||
var testInternalPostgresError = function() {
|
||||
var cancelClient = client()
|
||||
var queryClient = client()
|
||||
|
||||
var runStream = function(callback) {
|
||||
var txt = "COPY (SELECT pg_sleep(10)) TO STDOUT"
|
||||
var stream = queryClient.query(copy(txt))
|
||||
stream.on('data', function(data) {
|
||||
// Just throw away the data.
|
||||
})
|
||||
stream.on('error', callback)
|
||||
|
||||
setTimeout(function() {
|
||||
var cancelQuery = "SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query ~ 'pg_sleep' AND NOT query ~ 'pg_cancel_backend'"
|
||||
cancelClient.query(cancelQuery, function() { cancelClient.end() })
|
||||
}, 50)
|
||||
}
|
||||
|
||||
runStream(function(err) {
|
||||
assert.notEqual(err, null)
|
||||
var expectedMessage = 'canceling statement due to user request'
|
||||
assert.notEqual(err.toString().indexOf(expectedMessage), -1, 'Error message should mention reason for query failure.')
|
||||
queryClient.end()
|
||||
})
|
||||
}
|
||||
testInternalPostgresError()
|
||||
|
||||
var testNoticeResponse = function() {
|
||||
// we use a special trick to generate a warning
|
||||
// on the copy stream.
|
||||
var queryClient = client()
|
||||
var set = '';
|
||||
set += 'SET SESSION client_min_messages = WARNING;'
|
||||
set += 'SET SESSION standard_conforming_strings = off;'
|
||||
set += 'SET SESSION escape_string_warning = on;'
|
||||
queryClient.query(set, function(err, res) {
|
||||
assert.equal(err, null, 'testNoticeResponse - could not SET parameters')
|
||||
var runStream = function(callback) {
|
||||
var txt = "COPY (SELECT '\\\n') TO STDOUT"
|
||||
var stream = queryClient.query(copy(txt))
|
||||
stream.on('data', function(data) {
|
||||
})
|
||||
stream.on('error', callback)
|
||||
|
||||
// make sure stream is pulled from
|
||||
stream.pipe(concat(callback.bind(null,null)))
|
||||
}
|
||||
|
||||
runStream(function(err) {
|
||||
assert.equal(err, null, err)
|
||||
queryClient.end()
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
testNoticeResponse();
|
||||
|
||||
var warnAndReturnOne = `
|
||||
CREATE OR REPLACE FUNCTION pg_temp.test_warn_return_one()
|
||||
RETURNS INTEGER
|
||||
AS $$
|
||||
BEGIN
|
||||
RAISE WARNING 'hey, this is returning one';
|
||||
RETURN 1;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql`;
|
||||
|
||||
var testInterspersedMessageDoesNotBreakCopyFlow = function() {
|
||||
var toClient = client();
|
||||
toClient.query(warnAndReturnOne, (err, res) => {
|
||||
var q = "COPY (SELECT * FROM pg_temp.test_warn_return_one()) TO STDOUT WITH (FORMAT 'csv', HEADER true)";
|
||||
var stream = toClient.query(copy(q));
|
||||
var done = gonna('got expected COPY TO payload', 1000, function() {
|
||||
toClient.end();
|
||||
});
|
||||
|
||||
stream.pipe(concat(function(buf) {
|
||||
res = buf.toString('utf8')
|
||||
}));
|
||||
|
||||
stream.on('end', function() {
|
||||
var expected = "test_warn_return_one\n1\n";
|
||||
assert.equal(res, expected);
|
||||
// note the header counts as a row
|
||||
assert.equal(stream.rowCount, 2, 'should have rowCount = 2 but got ' + stream.rowCount);
|
||||
done();
|
||||
});
|
||||
});
|
||||
};
|
||||
testInterspersedMessageDoesNotBreakCopyFlow();
|
||||
|
||||
var testInterspersedMessageEmitsWarnign = function() {
|
||||
var toClient = client();
|
||||
toClient.query(warnAndReturnOne, (err, res) => {
|
||||
var q = "COPY (SELECT * FROM pg_temp.test_warn_return_one()) TO STDOUT WITH (FORMAT 'csv', HEADER true)";
|
||||
var stream = toClient.query(copy(q));
|
||||
var done = gonna('got expected warning event', 1000, function() {
|
||||
toClient.end();
|
||||
});
|
||||
|
||||
stream.on('warning', function(msg) {
|
||||
assert(msg.match(/Got an interspersed message:.*WARNING.*hey, this is returning one/),
|
||||
'did not get expected warning for interspersed message in COPY TO');
|
||||
done();
|
||||
})
|
||||
});
|
||||
};
|
||||
testInterspersedMessageEmitsWarnign();
|
||||
|
||||
var testClientReuse = function() {
|
||||
var c = client();
|
||||
var limit = 100000;
|
||||
var countMax = 10;
|
||||
var countA = countMax;
|
||||
var countB = 0;
|
||||
var runStream = function(num, callback) {
|
||||
var sql = "COPY (SELECT * FROM generate_series(0,"+limit+")) TO STDOUT"
|
||||
var stream = c.query(copy(sql))
|
||||
stream.on('error', callback)
|
||||
stream.pipe(concat(function(buf) {
|
||||
var res = buf.toString('utf8');
|
||||
var exp = _.range(0, limit+1).join('\n') + '\n'
|
||||
assert.equal(res, exp, 'clientReuse: sent & received buffer should be equal')
|
||||
countB++;
|
||||
callback();
|
||||
}))
|
||||
}
|
||||
|
||||
var rs = function(err) {
|
||||
assert.equal(err, null, err)
|
||||
countA--;
|
||||
if (countA) {
|
||||
runStream(countB, rs)
|
||||
} else {
|
||||
assert.equal(countB, countMax, 'clientReuse: there should be countMax queries on the same client')
|
||||
c.end()
|
||||
}
|
||||
};
|
||||
|
||||
runStream(countB, rs);
|
||||
|
||||
}
|
||||
testClientReuse();
|
||||
|
||||
@@ -1,2 +1,5 @@
|
||||
'use strict';
|
||||
|
||||
require('./copy-from')
|
||||
require('./copy-to')
|
||||
require('./binary')
|
||||
|
||||
Reference in New Issue
Block a user