5 Commits

Author SHA1 Message Date
brianc
be7af371d8 Add more versions to the travis test matrix 2016-05-03 13:07:50 -05:00
brianc
17697e98d7 Re-enable old tests 2016-05-03 13:07:29 -05:00
Chris Kinsman
bd4a87d3a0 Remove node 0.10 and add 0.12 2015-12-16 16:58:18 -08:00
Chris Kinsman
9d197a91e1 Add resume 2015-12-16 16:43:22 -08:00
Chris Kinsman
e1ce9a3948 Eliminate detach/reattach strategy as it isn't able to differentiate between on/once and inconsistenly loses unshifted data depending on node version. Instead just split stream and send it to copy stream and the connection.stream at the same time. Disconnecting copy stream just means unpiping. Added handleCopyData to fulfill query contract but ignore the incoming data.
Add node 4.2.2 to Travis
Minimum postgres 9.2 to allow tests to complete in Travis

Remove test that is no longer needed since we no longer disconnect/reconnect listeners
2015-12-16 16:21:13 -08:00
15 changed files with 82 additions and 687 deletions

View File

@@ -1,4 +0,0 @@
.gitignore
.travis.yml
bench/
test/

View File

@@ -1,9 +1,10 @@
language: node_js
node_js:
- "0.12"
- "4"
- "5"
- "6"
- "8"
- "10"
- "11"
addons:
postgresql: "9.2"

View File

@@ -12,8 +12,3 @@ minor: test
npm version minor -m "Bump version"
git push origin master --tags
npm publish
major: test
npm version major -m "Bump version"
git push origin master --tags
npm publish

View File

@@ -20,12 +20,10 @@ If you're not familiar with the feature (I wasn't either) you can read this for
### pipe from a table to stdout
```js
var {Pool} = require('pg');
var pg = require('pg');
var copyTo = require('pg-copy-streams').to;
var pool = new Pool();
pool.connect(function(err, client, done) {
pg.connect(function(err, client, done) {
var stream = client.query(copyTo('COPY my_table TO STDOUT'));
stream.pipe(process.stdout);
stream.on('end', done);
@@ -37,24 +35,17 @@ pool.connect(function(err, client, done) {
```js
var fs = require('fs');
var {Pool} = require('pg');
var pg = require('pg');
var copyFrom = require('pg-copy-streams').from;
var pool = new Pool();
pool.connect(function(err, client, done) {
pg.connect(function(err, client, done) {
var stream = client.query(copyFrom('COPY my_table FROM STDIN'));
var fileStream = fs.createReadStream('some_file.tsv')
fileStream.on('error', done);
stream.on('error', done);
stream.on('end', done);
fileStream.pipe(stream);
fileStream.pipe(stream).on('finish', done).on('error', done);
});
```
*Important*: Even if `pg-copy-streams.from` is used as a Writable (via `pipe`), you should not listen for the 'finish' event and expect that the COPY command has already been correctly acknowledged by the database. Internally, a duplex stream is used to pipe the data into the database connection and the COPY command should be considered complete only when the 'end' event is triggered.
## install
```sh
@@ -65,30 +56,7 @@ $ npm install pg-copy-streams
This module __only__ works with the pure JavaScript bindings. If you're using `require('pg').native` please make sure to use normal `require('pg')` or `require('pg.js')` when you're using copy streams.
Before you set out on this magical piping journey, you _really_ should read this: http://www.postgresql.org/docs/current/static/sql-copy.html, and you might want to take a look at the [tests](https://github.com/brianc/node-pg-copy-streams/tree/master/test) to get an idea of how things work.
Take note of the following warning in the PostgreSQL documentation:
> COPY stops operation at the first error. This should not lead to problems in the event of a COPY TO, but the target table will already have received earlier rows in a COPY FROM. These rows will not be visible or accessible, but they still occupy disk space. This might amount to a considerable amount of wasted disk space if the failure happened well into a large copy operation. You might wish to invoke VACUUM to recover the wasted space.
## benchmarks
The COPY command is commonly used to move huge sets of data. This can put some pressure on the node.js loop, the amount of CPU or the amount of memory used.
There is a bench/ directory in the repository where benchmark scripts are stored. If you have performance issues with `pg-copy-stream` do not hesitate to write a new benchmark that highlights your issue. Please avoid to commit huge files (PR won't be accepted) and find other ways to generate huge datasets.
If you have a local instance of postgres on your machine, you can start a benchmark for example with
```sh
$ cd bench
$ PGPORT=5432 PGDATABASE=postgres node copy-from.js
```
## tests
In order to launch the test suite, you need to have a local instance of postgres running on your machine.
```sh
$ PGPORT=5432 PGDATABASE=postgres make test
```
Before you set out on this magical piping journey, you _really_ should read this: http://www.postgresql.org/docs/9.3/static/sql-copy.html, and you might want to take a look at the [tests](https://github.com/brianc/node-pg-copy-streams/tree/master/test) to get an idea of how things work.
## contributing
@@ -103,36 +71,6 @@ Generally how I work is if you submit a few pull requests and you're interested
Since this isn't a module with tons of installs and dependent modules I hope we can work together on this to iterate faster here and make something really useful.
## changelog
### version 2.x - published YYYY-MM-DD
### version 2.2.0 - published YYYY-MM-DD
* Small refactor in copy-from passing from 3 push to 2 push in every chunk transform loop
* Add bench/ directory for benchmarks
* Add benchmark to compare performance of pg-copy-stream wrt psql during copy-from
* Add benchmark to measure memory usage of copy-from
### version 2.1.0 - published 2019-03-19
* Change README to stop using the pg pool singleton (removed after pg 7.0)
* Do not register copy-to.pushBufferIfNeeded on the instance itself (avoid dangling method on the object)
* Fix copy-to test wrt intermittent unhandled promise bug
* Add tests regarding client re-use
### version 2.0.0 - published 2019-03-14
This version's major change is a modification in the COPY TO implementation. In the previous version, when a chunk was received from the database, it was analyzed and every row contained within that chunk was pushed individually down the stream pipeline. Small rows could lead to a "one chunk" / "thousands of row pushed" performance issue in node. Thanks to @rafatower & CartoDB for the patch.
This is considered to be a major change since some people could be relying on the fact that each outgoing chunk is an individual row.
Other changes in this version
* Use Strict
* Travis deprecation of old node version (0.12, 0.4). Support LTS 6, 8, 10 and Current 11
* Update dev dependencies (pg, lodash)
* Stop using deprecated Buffer constructor
* Add package-lock.json
## license
The MIT License (MIT)

View File

@@ -1,45 +0,0 @@
var cp = require('duplex-child-process');
var pg = require('pg')
var copy = require('../').from
var client = function() {
var client = new pg.Client()
client.connect()
return client
}
var inStream = function() {
return cp.spawn('seq', ['0', '29999999']);
}
var running = true;
var c = client();
c.query('DROP TABLE IF EXISTS plugnumber', function() {
c.query('CREATE TABLE plugnumber (num int)', function() {
var seq = inStream()
var from = c.query(copy('COPY plugnumber FROM STDIN'))
seq.pipe(from);
from.on('end', function() {
running = false;
c.end();
})
})
})
var rssMin = process.memoryUsage().rss / 1024 / 1024
var rssMax = rssMin
memlog = function() {
var rss = process.memoryUsage().rss / 1024 / 1024
rssMin = Math.min(rss, rssMin)
rssMax = Math.max(rss, rssMax)
console.log('rss:' + Math.round(rss*100)/100 + 'MB rssMin:'+ Math.round(rssMin*100)/100 + 'MB rssMax:' + Math.round(rssMax*100)/100 + 'MB')
if (running) {
setTimeout(memlog, 1000);
}
}
memlog()

View File

@@ -1,86 +0,0 @@
var Benchmark = require('benchmark');
var cp = require('duplex-child-process');
var pg = require('pg')
var copy = require('../').from
var client = function() {
var client = new pg.Client()
client.connect()
return client
}
var psql = '/opt/postgresql-9.6.1/bin/psql'
var limit = 999999;
var inStream = function() {
return cp.spawn('seq', ['0', ''+limit]);
}
var suite = new Benchmark.Suite;
suite
.add({
name: 'unix pipe into psql COPY',
defer: true,
fn: function(d) {
var c = client();
c.query('DROP TABLE IF EXISTS plugnumber', function() {
c.query('CREATE TABLE plugnumber (num int)', function() {
c.end();
var from = cp.spawn('sh', ['-c', 'seq 0 '+limit+' | '+psql+' postgres -c \'COPY plugnumber FROM STDIN\''])
from.on('close', function() {
d.resolve();
})
})
})
}
})
.add({
name: 'pipe into psql COPY',
defer: true,
fn: function(d) {
var c = client();
c.query('DROP TABLE IF EXISTS plugnumber', function() {
c.query('CREATE TABLE plugnumber (num int)', function() {
c.end();
var seq = inStream();
var from = cp.spawn(psql, ['postgres', '-c', 'COPY plugnumber FROM STDIN'])
seq.pipe(from);
from.on('close', function() {
d.resolve();
})
})
})
}
})
.add({
name: 'pipe into pg-copy-stream COPY',
defer: true,
fn: function(d) {
var c = client();
c.query('DROP TABLE IF EXISTS plugnumber', function() {
c.query('CREATE TABLE plugnumber (num int)', function() {
var seq = inStream()
var from = c.query(copy('COPY plugnumber FROM STDIN'))
seq.pipe(from);
from.on('end', function() {
c.end();
d.resolve();
})
})
})
}
})
.on('cycle', function(event) {
console.log(String(event.target));
})
.on('complete', function() {
console.log('Fastest is ' + this.filter('fastest').map('name'));
});
var c = client()
c.query('DROP TABLE IF EXISTS plugnumber', function() {
c.end();
suite.run();
})

View File

@@ -1,17 +1,14 @@
'use strict';
module.exports = function(txt, options) {
return new CopyStreamQuery(txt, options)
}
var Transform = require('stream').Transform
var util = require('util')
var code = require('./message-formats')
var CopyStreamQuery = function(text, options) {
Transform.call(this, options)
this.text = text
this._gotCopyOutResponse = false
this._copyOutResponse = null
this.rowCount = 0
}
@@ -26,90 +23,63 @@ CopyStreamQuery.prototype.submit = function(connection) {
connection.stream.pipe(this)
}
var code = {
E: 69, //Error
H: 72, //CopyOutResponse
d: 0x64, //CopyData
c: 0x63 //CopyDone
}
CopyStreamQuery.prototype._detach = function() {
this.connection.stream.unpipe(this)
// Unpipe can drop us out of flowing mode
this.connection.stream.resume()
}
CopyStreamQuery.prototype._transform = function(chunk, enc, cb) {
var offset = 0
var Byte1Len = 1;
var Int32Len = 4;
if(this._remainder && chunk) {
chunk = Buffer.concat([this._remainder, chunk])
}
var length;
var messageCode;
var needPush = false;
var buffer = Buffer.alloc(chunk.length);
var buffer_offset = 0;
var self = this;
var pushBufferIfneeded = function() {
if (needPush && buffer_offset > 0) {
self.push(buffer.slice(0, buffer_offset))
buffer_offset = 0;
if(!this._copyOutResponse) {
this._copyOutResponse = true
if(chunk[0] == code.E) {
this._detach()
this.push(null)
return cb();
}
if(chunk[0] != code.H) {
this.emit('error', new Error('Expected copy out response'))
}
var length = chunk.readUInt32BE(1)
offset = 1
offset += length
}
while((chunk.length - offset) >= (Byte1Len + Int32Len)) {
while((chunk.length - offset) > 5) {
var messageCode = chunk[offset]
//console.log('PostgreSQL message ' + String.fromCharCode(messageCode))
switch(messageCode) {
// detect COPY start
case code.CopyOutResponse:
if (!this._gotCopyOutResponse) {
this._gotCopyOutResponse = true
} else {
this.emit('error', new Error('Unexpected CopyOutResponse message (H)'))
}
break;
// meaningful row
case code.CopyData:
needPush = true;
break;
// standard interspersed messages. discard
case code.ParameterStatus:
case code.NoticeResponse:
case code.NotificationResponse:
break;
case code.ErrorResponse:
case code.CopyDone:
pushBufferIfneeded();
this._detach()
this.push(null)
return cb();
break;
default:
this.emit('error', new Error('Unexpected PostgreSQL message ' + String.fromCharCode(messageCode)))
//complete or error
if(messageCode == code.c || messageCode == code.E) {
this._detach()
this.push(null)
return cb();
}
length = chunk.readUInt32BE(offset+Byte1Len)
if(chunk.length >= (offset + Byte1Len + length)) {
offset += Byte1Len + Int32Len
if (needPush) {
var row = chunk.slice(offset, offset + length - Int32Len)
this.rowCount++
row.copy(buffer, buffer_offset);
buffer_offset += row.length;
}
offset += (length - Int32Len)
//something bad happened
if(messageCode != code.d) {
return this.emit('error', new Error('expected "d" (copydata message)'))
}
var length = chunk.readUInt32BE(offset + 1) - 4 //subtract length of UInt32
//can we read the next row?
if(chunk.length > (offset + length + 5)) {
offset += 5
var slice = chunk.slice(offset, offset + length)
offset += length
this.push(slice)
this.rowCount++
} else {
// we need more chunks for a complete message
break;
}
}
pushBufferIfneeded();
if(chunk.length - offset) {
var slice = chunk.slice(offset)
this._remainder = slice

View File

@@ -1,5 +1,3 @@
'use strict';
var CopyToQueryStream = require('./copy-to')
module.exports = {
to: function(txt, options) {
@@ -12,7 +10,6 @@ module.exports = {
var Transform = require('stream').Transform
var util = require('util')
var code = require('./message-formats')
var CopyStreamQuery = function(text, options) {
Transform.call(this, options)
@@ -29,21 +26,28 @@ CopyStreamQuery.prototype.submit = function(connection) {
connection.query(this.text)
}
var code = {
H: 72, //CopyOutResponse
d: 0x64, //CopyData
c: 0x63 //CopyDone
}
var copyDataBuffer = Buffer([code.d])
CopyStreamQuery.prototype._transform = function(chunk, enc, cb) {
var Int32Len = 4;
var lenBuffer = Buffer.from([code.CopyData, 0, 0, 0, 0])
lenBuffer.writeUInt32BE(chunk.length + Int32Len, 1)
this.push(copyDataBuffer)
var lenBuffer = Buffer(4)
lenBuffer.writeUInt32BE(chunk.length + 4, 0)
this.push(lenBuffer)
this.push(chunk)
this.rowCount++
cb()
}
CopyStreamQuery.prototype._flush = function(cb) {
var Int32Len = 4;
var finBuffer = Buffer.from([code.CopyDone, 0, 0, 0, Int32Len])
var finBuffer = Buffer([code.c, 0, 0, 0, 4])
this.push(finBuffer)
this.cb_flush = cb
//never call this callback, do not close underlying stream
//cb()
}
CopyStreamQuery.prototype.handleError = function(e) {
@@ -51,24 +55,12 @@ CopyStreamQuery.prototype.handleError = function(e) {
}
CopyStreamQuery.prototype.handleCopyInResponse = function(connection) {
this.pipe(connection.stream, { end: false })
this.pipe(connection.stream)
}
CopyStreamQuery.prototype.handleCommandComplete = function(msg) {
// Parse affected row count as in
// https://github.com/brianc/node-postgres/blob/35e5567f86774f808c2a8518dd312b8aa3586693/lib/result.js#L37
var match = /COPY (\d+)/.exec((msg || {}).text)
if (match) {
this.rowCount = parseInt(match[1], 10)
}
// we delay the _flush cb so that the 'end' event is
// triggered after CommandComplete
this.cb_flush()
// unpipe from connection
this.unpipe(this.connection)
this.connection = null
CopyStreamQuery.prototype.handleCommandComplete = function() {
this.unpipe()
this.emit('end')
}
CopyStreamQuery.prototype.handleReadyForQuery = function() {

View File

@@ -1,25 +0,0 @@
/**
* The COPY feature uses the following protocol codes.
* The codes for the most recent protocol version are documented on
* https://www.postgresql.org/docs/current/static/protocol-message-formats.html
*
* The protocol flow itself is described on
* https://www.postgresql.org/docs/current/static/protocol-flow.html
*/
module.exports = {
ErrorResponse: 0x45, // E
CopyInResponse: 0x47, // G
CopyOutResponse: 0x48, // H
CopyBothResponse: 0x57, // W
CopyDone: 0x63, // c
CopyData: 0x64, // d
CopyFail: 0x66, // f
// It is possible for NoticeResponse and ParameterStatus messages to be interspersed between CopyData messages;
// frontends must handle these cases, and should be prepared for other asynchronous message types as well
// (see Section 50.2.6).
// Otherwise, any message type other than CopyData or CopyDone may be treated as terminating copy-out mode.
NotificationResponse: 0x41, // A
NoticeResponse: 0x4E, // N
ParameterStatus: 0x53 // S
}

215
package-lock.json generated
View File

@@ -1,215 +0,0 @@
{
"name": "pg-copy-streams",
"version": "2.2.0",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
"async": {
"version": "0.2.10",
"resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz",
"integrity": "sha1-trvgsGdLnXGXCMo43owjfLUmw9E=",
"dev": true
},
"base64-js": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-0.0.2.tgz",
"integrity": "sha1-Ak8Pcq+iW3X5wO5zzU9V7Bvtl4Q=",
"dev": true
},
"benchmark": {
"version": "2.1.4",
"resolved": "https://registry.npmjs.org/benchmark/-/benchmark-2.1.4.tgz",
"integrity": "sha1-CfPeMckWQl1JjMLuVloOvzwqVik=",
"dev": true,
"requires": {
"lodash": "^4.17.4",
"platform": "^1.3.3"
}
},
"bops": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/bops/-/bops-0.0.6.tgz",
"integrity": "sha1-CC0dVfoB5g29wuvC26N/ZZVUzzo=",
"dev": true,
"requires": {
"base64-js": "0.0.2",
"to-utf8": "0.0.1"
}
},
"buffer-writer": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/buffer-writer/-/buffer-writer-2.0.0.tgz",
"integrity": "sha512-a7ZpuTZU1TRtnwyCNW3I5dc0wWNC3VR9S++Ewyk2HHZdrO3CQJqSpd+95Us590V6AL7JqUAH2IwZ/398PmNFgw==",
"dev": true
},
"concat-stream": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.1.0.tgz",
"integrity": "sha1-hCae/YzGUCdeMi8wnfRIZ7xRxfM=",
"dev": true,
"requires": {
"bops": "0.0.6"
}
},
"duplex-child-process": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/duplex-child-process/-/duplex-child-process-1.0.0.tgz",
"integrity": "sha1-SpSXQob7x4QNCFPSs/5ZCp20YUc=",
"dev": true
},
"gonna": {
"version": "0.0.0",
"resolved": "https://registry.npmjs.org/gonna/-/gonna-0.0.0.tgz",
"integrity": "sha1-6k4ZsVJ6F4LhJQVeMCSabUvHmlk=",
"dev": true
},
"heroku-env": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/heroku-env/-/heroku-env-0.1.1.tgz",
"integrity": "sha1-wGeRyUTpuHSOMXf1S/cBQyZ+Yxc=",
"dev": true,
"requires": {
"parse-database-url": "~0.2.0"
}
},
"lodash": {
"version": "4.17.11",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz",
"integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==",
"dev": true
},
"packet-reader": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/packet-reader/-/packet-reader-1.0.0.tgz",
"integrity": "sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ==",
"dev": true
},
"parse-database-url": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/parse-database-url/-/parse-database-url-0.2.2.tgz",
"integrity": "sha1-SGFa56fA/HfjKU0jVCpqUnPDVws=",
"dev": true
},
"pg": {
"version": "7.8.2",
"resolved": "https://registry.npmjs.org/pg/-/pg-7.8.2.tgz",
"integrity": "sha512-5U4fjV43DnQxelkhyPdU3YfUbYVa21bNmreXRCM/gFFw09YxWaitWWITm/u0twUNF5EYOSDhkgyEAocgtpP9JQ==",
"dev": true,
"requires": {
"buffer-writer": "2.0.0",
"packet-reader": "1.0.0",
"pg-connection-string": "0.1.3",
"pg-pool": "^2.0.4",
"pg-types": "~2.0.0",
"pgpass": "1.x",
"semver": "4.3.2"
}
},
"pg-connection-string": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-0.1.3.tgz",
"integrity": "sha1-2hhHsglA5C7hSSvq9l1J2RskXfc=",
"dev": true
},
"pg-int8": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz",
"integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==",
"dev": true
},
"pg-pool": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-2.0.6.tgz",
"integrity": "sha512-hod2zYQxM8Gt482q+qONGTYcg/qVcV32VHVPtktbBJs0us3Dj7xibISw0BAAXVMCzt8A/jhfJvpZaxUlqtqs0g==",
"dev": true
},
"pg-types": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.0.0.tgz",
"integrity": "sha512-THUD7gQll5tys+5eQ8Rvs7DjHiIC3bLqixk3gMN9Hu8UrCBAOjf35FoI39rTGGc3lM2HU/R+Knpxvd11mCwOMA==",
"dev": true,
"requires": {
"pg-int8": "1.0.1",
"postgres-array": "~2.0.0",
"postgres-bytea": "~1.0.0",
"postgres-date": "~1.0.0",
"postgres-interval": "^1.1.0"
}
},
"pgpass": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.2.tgz",
"integrity": "sha1-Knu0G2BltnkH6R2hsHwYR8h3swY=",
"dev": true,
"requires": {
"split": "^1.0.0"
}
},
"platform": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/platform/-/platform-1.3.5.tgz",
"integrity": "sha512-TuvHS8AOIZNAlE77WUDiR4rySV/VMptyMfcfeoMgs4P8apaZM3JrnbzBiixKUv+XR6i+BXrQh8WAnjaSPFO65Q==",
"dev": true
},
"postgres-array": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz",
"integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==",
"dev": true
},
"postgres-bytea": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz",
"integrity": "sha1-AntTPAqokOJtFy1Hz5zOzFIazTU=",
"dev": true
},
"postgres-date": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.3.tgz",
"integrity": "sha1-4tiXAu/bJY/52c7g/pG9BpdSV6g=",
"dev": true
},
"postgres-interval": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz",
"integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==",
"dev": true,
"requires": {
"xtend": "^4.0.0"
}
},
"semver": {
"version": "4.3.2",
"resolved": "https://registry.npmjs.org/semver/-/semver-4.3.2.tgz",
"integrity": "sha1-x6BxWKgL7dBSNVt3DYLWZA+AO+c=",
"dev": true
},
"split": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz",
"integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==",
"dev": true,
"requires": {
"through": "2"
}
},
"through": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
"integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
"dev": true
},
"to-utf8": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/to-utf8/-/to-utf8-0.0.1.tgz",
"integrity": "sha1-0Xrqcv8vujm55DYBvns/9y4ImFI=",
"dev": true
},
"xtend": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz",
"integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68=",
"dev": true
}
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "pg-copy-streams",
"version": "2.2.0",
"version": "0.3.0",
"description": "Low-Level COPY TO and COPY FROM streams for PostgreSQL in JavaScript using",
"main": "index.js",
"scripts": {
@@ -23,13 +23,11 @@
"url": "https://github.com/brianc/node-pg-copy-streams/issues"
},
"devDependencies": {
"async": "~0.2.10",
"benchmark": "^2.1.4",
"pg": "~4.4.3",
"concat-stream": "~1.1.0",
"duplex-child-process": "^1.0.0",
"gonna": "0.0.0",
"lodash": "~2.2.1",
"heroku-env": "~0.1.1",
"lodash": "^4.17.11",
"pg": "^7.8.2"
"async": "~0.2.10"
}
}

View File

@@ -1,5 +1,3 @@
'use strict';
var assert = require('assert')
var gonna = require('gonna')
@@ -21,7 +19,7 @@ var testBinaryCopy = function() {
var fromClient = client()
var toClient = client()
var queries = [
queries = [
'DROP TABLE IF EXISTS data',
'CREATE TABLE IF NOT EXISTS data (num BIGINT, word TEXT)',
'INSERT INTO data (num, word) VALUES (1, \'hello\'), (2, \'other thing\'), (3, \'goodbye\')',
@@ -35,7 +33,7 @@ var testBinaryCopy = function() {
var fromStream = fromClient.query(to('COPY (SELECT * FROM data) TO STDOUT BINARY'))
var toStream = toClient.query(from('COPY data_copy FROM STDIN BINARY'))
var runStream = function(callback) {
runStream = function(callback) {
fromStream.on('error', callback)
toStream.on('error', callback)
toStream.on('finish', callback)

View File

@@ -1,5 +1,3 @@
'use strict';
var assert = require('assert')
var gonna = require('gonna')
@@ -32,8 +30,12 @@ var testRange = function(top) {
var txt = 'COPY numbers FROM STDIN'
var stream = fromClient.query(copy(txt))
var rowEmitCount = 0
stream.on('row', function() {
rowEmitCount++
})
for(var i = 0; i < top; i++) {
stream.write(Buffer.from('' + i + '\t' + i*10 + '\n'))
stream.write(Buffer('' + i + '\t' + i*10 + '\n'))
}
stream.end()
var countDone = gonna('have correct count')
@@ -41,7 +43,6 @@ var testRange = function(top) {
fromClient.query('SELECT COUNT(*) FROM numbers', function(err, res) {
assert.ifError(err)
assert.equal(res.rows[0].count, top, 'expected ' + top + ' rows but got ' + res.rows[0].count)
assert.equal(stream.rowCount, top, 'expected ' + top + ' rows but db count is ' + stream.rowCount)
//console.log('found ', res.rows.length, 'rows')
countDone()
var firstRowDone = gonna('have correct result')
@@ -57,48 +58,3 @@ var testRange = function(top) {
}
testRange(1000)
var testSingleEnd = function() {
var fromClient = client()
fromClient.query('CREATE TEMP TABLE numbers(num int)')
var txt = 'COPY numbers FROM STDIN';
var stream = fromClient.query(copy(txt))
var count = 0;
stream.on('end', function() {
count++;
assert(count==1, '`end` Event was triggered ' + count + ' times');
if (count == 1) fromClient.end();
})
stream.end(Buffer.from('1\n'))
}
testSingleEnd()
var testClientReuse = function() {
var fromClient = client()
fromClient.query('CREATE TEMP TABLE numbers(num int)')
var txt = 'COPY numbers FROM STDIN';
var count = 0;
var countMax = 2;
var card = 100000;
var runStream = function() {
var stream = fromClient.query(copy(txt))
stream.on('end', function() {
count++;
if (count<countMax) {
runStream()
} else {
fromClient.query('SELECT sum(num) AS s FROM numbers', function(err, res) {
var total = countMax * card * (card+1)
assert.equal(res.rows[0].s, total, 'copy-from.ClientReuse wrong total')
fromClient.end()
})
}
})
stream.write(Buffer.from(_.range(0, card+1).join('\n') + '\n'))
stream.end(Buffer.from(_.range(0, card+1).join('\n') + '\n'))
}
runStream();
}
testClientReuse()

View File

@@ -1,5 +1,3 @@
'use strict';
var assert = require('assert')
var gonna = require('gonna')
@@ -9,7 +7,6 @@ var concat = require('concat-stream')
var pg = require('pg')
var copy = require('../').to
var code = require('../message-formats')
var client = function() {
var client = new pg.Client()
@@ -22,20 +19,9 @@ var testConstruction = function() {
var stream = copy(txt, {highWaterMark: 10})
assert.equal(stream._readableState.highWaterMark, 10, 'Client should have been set with a correct highWaterMark.')
}
testConstruction()
var testComparators = function() {
var copy1 = copy();
copy1.pipe(concat(function(buf) {
assert(copy1._gotCopyOutResponse, 'should have received CopyOutResponse')
assert(!copy1._remainder, 'Message with no additional data (len=Int4Len+0) should not leave a remainder')
}))
copy1.end(new Buffer.from([code.CopyOutResponse, 0x00, 0x00, 0x00, 0x04]));
}
testComparators();
var testRange = function(top) {
var fromClient = client()
var txt = 'COPY (SELECT * from generate_series(0, ' + (top - 1) + ')) TO STDOUT'
@@ -58,6 +44,7 @@ var testRange = function(top) {
done()
});
}
testRange(10000)
var testInternalPostgresError = function() {
@@ -74,7 +61,7 @@ var testInternalPostgresError = function() {
setTimeout(function() {
var cancelQuery = "SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query ~ 'pg_sleep' AND NOT query ~ 'pg_cancel_backend'"
cancelClient.query(cancelQuery, function() { cancelClient.end() })
cancelClient.query(cancelQuery)
}, 50)
}
@@ -82,72 +69,9 @@ var testInternalPostgresError = function() {
assert.notEqual(err, null)
var expectedMessage = 'canceling statement due to user request'
assert.notEqual(err.toString().indexOf(expectedMessage), -1, 'Error message should mention reason for query failure.')
cancelClient.end()
queryClient.end()
})
}
testInternalPostgresError()
var testNoticeResponse = function() {
// we use a special trick to generate a warning
// on the copy stream.
var queryClient = client()
var set = '';
set += 'SET SESSION client_min_messages = WARNING;'
set += 'SET SESSION standard_conforming_strings = off;'
set += 'SET SESSION escape_string_warning = on;'
queryClient.query(set, function(err, res) {
assert.equal(err, null, 'testNoticeResponse - could not SET parameters')
var runStream = function(callback) {
var txt = "COPY (SELECT '\\\n') TO STDOUT"
var stream = queryClient.query(copy(txt))
stream.on('data', function(data) {
})
stream.on('error', callback)
// make sure stream is pulled from
stream.pipe(concat(callback.bind(null,null)))
}
runStream(function(err) {
assert.equal(err, null, err)
queryClient.end()
})
})
}
testNoticeResponse();
var testClientReuse = function() {
var c = client();
var limit = 100000;
var countMax = 10;
var countA = countMax;
var countB = 0;
var runStream = function(num, callback) {
var sql = "COPY (SELECT * FROM generate_series(0,"+limit+")) TO STDOUT"
var stream = c.query(copy(sql))
stream.on('error', callback)
stream.pipe(concat(function(buf) {
var res = buf.toString('utf8');
var exp = _.range(0, limit+1).join('\n') + '\n'
assert.equal(res, exp, 'clientReuse: sent & received buffer should be equal')
countB++;
callback();
}))
}
var rs = function(err) {
assert.equal(err, null, err)
countA--;
if (countA) {
runStream(countB, rs)
} else {
assert.equal(countB, countMax, 'clientReuse: there should be countMax queries on the same client')
c.end()
}
};
runStream(countB, rs);
}
testClientReuse();

View File

@@ -1,5 +1,3 @@
'use strict';
require('./copy-from')
require('./copy-to')
require('./binary')