-
Notifications
You must be signed in to change notification settings - Fork 0
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Dump zone functionality #41
base: production-namebase
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
'use strict'; | ||
|
||
const bns = require('bns'); | ||
const {types} = require('bns/lib/constants'); | ||
const stream = require('stream'); | ||
|
||
const NameState = require('../covenants/namestate'); | ||
const {Resource} = require('../dns/resource'); | ||
|
||
/** | ||
* @typedef {import('../blockchain').Chain} Chain | ||
*/ | ||
|
||
/** | ||
* readableStream produces a newline-delimited list of all | ||
* DNS records on a chain, except for RRSIG, as a utf8 | ||
* encoded string. | ||
* | ||
* @param {Chain} chain the chain to stream from | ||
* @returns {stream.Readable} a readable stream of DNS records | ||
*/ | ||
function readableStream(chain) { | ||
const iter = chain.db.tree.iterator(true); | ||
|
||
async function* gen() { | ||
while (await iter.next()) { | ||
/** @type {NameState} */ | ||
const ns = NameState.decode(iter.value); | ||
if (ns.data.length <= 0) | ||
continue; | ||
|
||
/** @type {string} */ | ||
const fqdn = bns.util.fqdn(ns.name.toString('ascii')); | ||
|
||
/** @type {Resource} */ | ||
const resource = Resource.decode(ns.data); | ||
const zone = resource.toZone(fqdn); | ||
for (const record of zone) { | ||
if (record.type !== types.RRSIG && record.type !== types.TXT) { | ||
yield Buffer.from(record.toString() + '\n', 'utf8'); | ||
} | ||
} | ||
} | ||
} | ||
|
||
return stream.Readable.from(gen(), {objectMode: false}); | ||
} | ||
|
||
module.exports = { | ||
readableStream: readableStream | ||
}; |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,6 +21,8 @@ const Claim = require('../primitives/claim'); | |
const Address = require('../primitives/address'); | ||
const Network = require('../protocol/network'); | ||
const pkg = require('../pkg'); | ||
const AWS = require('aws-sdk'); | ||
const dumpzone = require('./dumpzone'); | ||
|
||
/** | ||
* HTTP | ||
|
@@ -48,6 +50,11 @@ class HTTP extends Server { | |
this.miner = this.node.miner; | ||
this.rpc = this.node.rpc; | ||
|
||
this.s3 = null; | ||
/** @type {AWS.S3.ManagedUpload | null} */ | ||
this.runningUpload = null; | ||
this.uploadProgress = null; | ||
|
||
this.init(); | ||
} | ||
|
||
|
@@ -72,6 +79,22 @@ class HTTP extends Server { | |
|
||
this.initRouter(); | ||
this.initSockets(); | ||
this.initS3(); | ||
} | ||
|
||
/** | ||
* Initialize the S3 service, if possible. | ||
* @private | ||
*/ | ||
|
||
initS3() { | ||
AWS.config.getCredentials((err) => { | ||
turbomaze marked this conversation as resolved.
Show resolved
Hide resolved
|
||
if (err) | ||
this.logger.warning('couldn\'t load AWS credentials', err.stack); | ||
// credentials not loaded | ||
else | ||
this.s3 = new AWS.S3(); | ||
}); | ||
} | ||
|
||
/** | ||
|
@@ -452,6 +475,79 @@ class HTTP extends Server { | |
|
||
res.json(200, { success: true }); | ||
}); | ||
|
||
// Initiate a zone dump to S3 | ||
this.post('/dump-zone-to-s3', async (req, res) => { | ||
if(this.s3 === null) { | ||
res.json(501, | ||
{ | ||
success: false, | ||
message: 'AWS is not properly configured' | ||
} | ||
); | ||
return; | ||
} | ||
|
||
if(this.runningUpload !== null) { | ||
res.json(202, | ||
{ | ||
success: true, | ||
message: 'upload already in progress' | ||
} | ||
); | ||
return; | ||
} | ||
|
||
this.runningUpload = this.s3.upload({ | ||
Bucket: this.options.s3DumpConfig.bucket, | ||
Key: this.options.s3DumpConfig.key, | ||
Body: dumpzone.readableStream(this.chain) | ||
}, (err, data) => { | ||
// TODO - capture status, do a rename? | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not blocking/not requesting this but might be nice for the key to have the timestamp and then to rename one "current" etc There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 👍 I'll verify how rename works in S3 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There's actually no way to rename something in S3, would have to do a copy then delete. Alternative could be to push files with the timestamp and have some other process to reap ones older than a certain age, but it would require the consumer to list the objects in the bucket and chose the lastest one There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. one other alternative might be enabling versioning on s3 bucket by default you will get the timestamp and maangement of old versions provided by aws There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Versioning sounds good 👍 |
||
this.runningUpload = null; | ||
this.uploadProgress = null; | ||
}); | ||
|
||
this.runningUpload.on('httpUploadProgress', (progress) => { | ||
this.uploadProgress = progress; | ||
}); | ||
|
||
res.json(202, { | ||
success: true, | ||
message: 'upload started' | ||
}); | ||
return; | ||
}); | ||
|
||
this.get('/dump-zone-to-s3', async (req, res) => { | ||
if(this.s3 === null) { | ||
res.json(501, | ||
{ | ||
success: false, | ||
message: 'AWS is not properly configured' | ||
} | ||
); | ||
return; | ||
} | ||
|
||
if(this.runningUpload === null) { | ||
res.json(200, | ||
{ | ||
success: true, | ||
message: 'No upload is currently running' | ||
} | ||
); | ||
return; | ||
} | ||
|
||
res.json(200, | ||
{ | ||
success: true, | ||
message: 'Upload in progress', | ||
progress: this.uploadProgress | ||
} | ||
); | ||
}); | ||
} | ||
|
||
/** | ||
|
@@ -894,6 +990,11 @@ class HTTPOptions { | |
this.noAuth = true; | ||
} | ||
|
||
if (options.s3 != null) { | ||
assert(typeof options.s3 === 'object'); | ||
this.s3DumpConfig = options.s3; | ||
} | ||
|
||
return this; | ||
} | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What are some other options besides requiring this package for the whole full node? Not blocking but not ideal since it's a big dependency and would probably never make it into upstream.
Could this be an optional peer dependency or something?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm not sure how peer dependencies work, exactly. I'd considered adding it as a separate plugin.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I believe aws sdk v2 lets you import only the services you are planning to use as well, that would trim down the loaded lib size abit.
var S3 = require('aws-sdk/clients/s3');
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Peer dependency is the wrong word since it means something really specific in node.js/npm. I trust aws-sdk but it feels bad adding a 50mb dependency for this one feature. Do you have a clear idea of how this dependency could live in a plugin so
hsd
stays clean?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
A plugin would just need access to the
Chain
, which is itself a plugin that it could take a dependency on, I believe. So a dump-zone-to-s3 plugin would pretty much be this code with some boilerplate to create the plugin itself and then we'd need some other way of triggering it than the HTTP endpoint on theNode
plugin interface.Since it assumes AWS anyway, we could use a queue. If that's too much we could just put up another HTTP interface on a third port. If we wanted to move more of our custom functionality into plugins, it'd be easy to extend that HTTP interface to cover all of them with a namebase meta-plugin that aggregated the calls
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Bump @turbomaze @rozaydin
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Replying in slack