From 5f57283065c86d1002c7e5c2d4e91ca80773f0a5 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 17 Jun 2013 18:59:56 +0100 Subject: [PATCH 01/30] Implemented basic persistance layer on top of LevelUp. --- index.js | 1 + lib/client.js | 4 ++ lib/persistance/index.js | 3 + lib/persistance/levelup.js | 39 +++++++++++++ lib/persistance/memory.js | 13 +++++ lib/server.js | 23 ++++++++ package.json | 11 +++- test/persistance/abstract.js | 96 ++++++++++++++++++++++++++++++++ test/persistance/levelup_spec.js | 17 ++++++ test/persistance/memory_spec.js | 10 ++++ test/server_spec.js | 58 +++++++++++++++++++ 11 files changed, 273 insertions(+), 2 deletions(-) create mode 100644 lib/persistance/index.js create mode 100644 lib/persistance/levelup.js create mode 100644 lib/persistance/memory.js create mode 100644 test/persistance/abstract.js create mode 100644 test/persistance/levelup_spec.js create mode 100644 test/persistance/memory_spec.js diff --git a/index.js b/index.js index d4d3d3c..35f2693 100644 --- a/index.js +++ b/index.js @@ -1,3 +1,4 @@ module.exports.Server = require("./lib/server"); module.exports.Authorizer = require("./lib/authorizer"); +module.exports.persistance = require("./lib/persistance"); diff --git a/lib/client.js b/lib/client.js index 93f7444..55e48b5 100644 --- a/lib/client.js +++ b/lib/client.js @@ -344,6 +344,10 @@ Client.prototype.handleSubscribe = function(packet) { return; } + packet.subscriptions.forEach(function(sub) { + that.server.emit("subscribed", sub.topic, that); + }); + if(!that._closed) { that.connection.suback({ messageId: packet.messageId, diff --git a/lib/persistance/index.js b/lib/persistance/index.js new file mode 100644 index 0000000..1161366 --- /dev/null +++ b/lib/persistance/index.js @@ -0,0 +1,3 @@ + +module.exports.Memory = require("./memory"); +module.exports.LevelUp = require("./levelup"); diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js new file mode 100644 index 0000000..6c59c7e --- /dev/null +++ b/lib/persistance/levelup.js @@ -0,0 +1,39 @@ + +var levelup = require("levelup"); +var sublevel = require("level-sublevel"); + +function LevelUpPersistance(path, options) { + options = options || {}; + options.valueEncoding = "json"; + this.db = sublevel(levelup(path, options)); + this._retained = this.db.sublevel("retained"); +} + +LevelUpPersistance.prototype.storeRetained = function(packet, cb) { + this._retained.put(packet.topic, packet, cb); +}; + +LevelUpPersistance.prototype.lookupRetained = function(pattern, cb) { + var stream = this._retained.createReadStream(); + var matched = []; + var regexp = new RegExp(pattern.replace(/(#|\+)/, ".+")); + + stream.on("error", cb); + + stream.on("end", function() { + cb(null, matched); + }); + + stream.on("data", function(data) { + + if (regexp.test(data.key)) { + matched.push(data.value); + } + }); +}; + +LevelUpPersistance.prototype.close = function(cb) { + this.db.close(cb); +}; + +module.exports = LevelUpPersistance; diff --git a/lib/persistance/memory.js b/lib/persistance/memory.js new file mode 100644 index 0000000..e20e67f --- /dev/null +++ b/lib/persistance/memory.js @@ -0,0 +1,13 @@ + +var LevelUpPersistance = require("./levelup"); +var util = require("util"); +var MemDOWN = require("memdown"); +var factory = function (location) { return new MemDOWN(location) }; + +function MemoryPersistance() { + LevelUpPersistance.call(this, "RAM", { db: factory }); +} + +util.inherits(MemoryPersistance, LevelUpPersistance); + +module.exports = MemoryPersistance; diff --git a/lib/server.js b/lib/server.js index 953b92a..55b652c 100644 --- a/lib/server.js +++ b/lib/server.js @@ -6,6 +6,7 @@ var ascoltatori = require("ascoltatori"); var EventEmitter = require("events").EventEmitter; var bunyan = require("bunyan"); var Client = require("./client"); +var persistance = require("./persistance"); /** * The Mosca Server is a very simple MQTT server that @@ -28,6 +29,8 @@ var Client = require("./client"); * the client is passed as a parameter. * - `published`, when a new message is published; * the packet and the client are passed as parameters. + * - `subcribed`, when a new client is subscribed to a pattern; + * the pattern and the client are passed as parameters. * * @param {Object} opts The option object * @param {Function} callback The ready callback @@ -68,6 +71,8 @@ function Server(opts, callback) { this.ascoltatore = ascoltatori.build(this.opts.backend); this.ascoltatore.on("error", this.emit.bind(this)); + this.persistance = this.opts.persistance || new persistance.Memory(); + that.once("ready", callback); async.series([ @@ -91,6 +96,24 @@ function Server(opts, callback) { that.on("clientDisconnected", function(client) { delete that.clients[client.id]; }); + + that.on("published", function(packet) { + if (packet.retain) { + that.persistance.storeRetained(packet); + } + }); + + that.on("subscribed", function(pattern, client) { + that.persistance.lookupRetained(pattern, function(err, matches) { + if (err) { + client.emit("error", err); + return; + } + matches.forEach(function(match) { + client.forward(match.topic, match.payload, match, pattern); + }); + }); + }); } module.exports = Server; diff --git a/package.json b/package.json index 0d2e95f..b7ef7f1 100644 --- a/package.json +++ b/package.json @@ -41,7 +41,8 @@ "dox-foundation": "~0.4.4", "jshint": "~1.0.0", "js-beautify": "~0.4.2", - "tmp": "0.0.17" + "tmp": "0.0.17", + "level-test": "~1.3.0" }, "dependencies": { "mqtt": "~0.2.10", @@ -50,7 +51,13 @@ "debug": "~0.7.2", "commander": "~1.1.1", "minimatch": "~0.2.11", - "bunyan": "~0.21.3" + "bunyan": "~0.21.3", + "memdown": "~0.2.0", + "level": "~0.10.0", + "levelup": "~0.10.0", + "level-sublevel": "~4.7.0", + "tmp": "0.0.20", + "leveldown": "~0.6.1" }, "optionalDependencies": { "zmq": "~2.4.0", diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js new file mode 100644 index 0000000..7648d0a --- /dev/null +++ b/test/persistance/abstract.js @@ -0,0 +1,96 @@ +"use strict"; + +var async = require("async"); + +module.exports = function(create) { + + var instance = null; + + beforeEach(function(done) { + create(function(err, result) { + if (err) { + return done(err); + } + + instance = result; + done(); + }); + }); + + afterEach(function(done) { + instance.close(done); + }); + + it("should store retain messages", function(done) { + var packet = { + topic: "hello", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }; + instance.storeRetained(packet, done); + }); + + it("should lookup retain messages and not matching", function(done) { + instance.lookupRetained("hello", function(err, results) { + expect(results).to.eql([]); + done(); + }); + }); + + it("should match and load a retain message", function(done) { + var packet = { + topic: "hello", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }; + + async.series([ + function(cb) { + instance.storeRetained(packet, cb); + }, + function(cb) { + instance.lookupRetained("hello", function(err, results) { + expect(results[0]).to.eql(packet); + cb(); + }); + } + ], done); + }); + + it("should match and load with a pattern", function(done) { + var packet1 = { + topic: "hello/1", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }; + + var packet2 = { + topic: "hello/2", + qos: 0, + payload: "world", + messageId: 43, + retain: true + }; + + async.series([ + function(cb) { + instance.storeRetained(packet1, cb); + }, + function(cb) { + instance.storeRetained(packet2, cb); + }, + function(cb) { + instance.lookupRetained("hello/#", function(err, results) { + expect(results).to.eql([packet1, packet2]); + cb(); + }); + } + ], done); + }); +}; diff --git a/test/persistance/levelup_spec.js b/test/persistance/levelup_spec.js new file mode 100644 index 0000000..0624d20 --- /dev/null +++ b/test/persistance/levelup_spec.js @@ -0,0 +1,17 @@ +"use strict"; + +var abstract = require("./abstract"); +var LevelUp = require("../../").persistance.LevelUp; +var tmp = require("tmp"); + +describe("mosca.persitance.LevelUp", function() { + abstract(function(cb) { + tmp.dir(function (err, path) { + if (err) { + return cb(err); + } + + cb(null, new LevelUp(path)); + }); + }); +}); diff --git a/test/persistance/memory_spec.js b/test/persistance/memory_spec.js new file mode 100644 index 0000000..72f1682 --- /dev/null +++ b/test/persistance/memory_spec.js @@ -0,0 +1,10 @@ +"use strict"; + +var abstract = require("./abstract"); +var Memory = require("../../").persistance.Memory; + +describe("mosca.persitance.Memory", function() { + abstract(function(cb) { + cb(null, new Memory()); + }); +}); diff --git a/test/server_spec.js b/test/server_spec.js index 2d9c05a..410c155 100644 --- a/test/server_spec.js +++ b/test/server_spec.js @@ -1033,4 +1033,62 @@ describe("mosca.Server", function() { }); }); }); + + it("should support retained messages", function(done) { + + async.waterfall([ + + function(cb) { + var client = mqtt.createConnection(settings.port, settings.host); + + client.on("connected", function() { + var opts = buildOpts(); + + client.connect(opts); + + client.on('connack', function(packet) { + + cb(null, client); + }); + }); + }, + + function(client, cb) { + client.publish({ + topic: "hello", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }); + client.on("close", cb); + client.stream.end(); + }, + + function(cb) { + buildAndConnect(done, function(client) { + cb(null, client); + }); + }, + + function(client, cb) { + var subscriptions = [{ + topic: "hello", + qos: 0 + } + ]; + + client.subscribe({ + subscriptions: subscriptions, + messageId: 42 + }); + + client.on("publish", function(packet) { + expect(packet.topic).to.be.eql("hello"); + expect(packet.payload).to.be.eql("world"); + client.disconnect(); + }); + } + ]); + }); }); From 718126ed679b2c8437ded582f467d7f2ad16ff78 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 17 Jun 2013 19:32:39 +0100 Subject: [PATCH 02/30] Moved the wiring to the Server in the persistance itself. --- lib/persistance/abstract.js | 32 +++++++++++++++++++++ lib/persistance/levelup.js | 4 +++ lib/persistance/memory.js | 2 +- lib/server.js | 22 +-------------- test/persistance/abstract.js | 48 ++++++++++++++++++++++++++++++++ test/persistance/levelup_spec.js | 2 +- test/persistance/memory_spec.js | 2 +- test/server_spec.js | 3 ++ 8 files changed, 91 insertions(+), 24 deletions(-) create mode 100644 lib/persistance/abstract.js diff --git a/lib/persistance/abstract.js b/lib/persistance/abstract.js new file mode 100644 index 0000000..69cc076 --- /dev/null +++ b/lib/persistance/abstract.js @@ -0,0 +1,32 @@ + +function AbstractPersistence() { + +} + +AbstractPersistence.prototype.wire = function(server) { + var that = this; + + server.on("published", function(packet) { + if (packet.retain) { + that.storeRetained(packet); + } + }); + + server.on("subscribed", function(pattern, client) { + that.lookupRetained(pattern, function(err, matches) { + if (err) { + client.emit("error", err); + return; + } + matches.forEach(function(match) { + client.forward(match.topic, match.payload, match, pattern); + }); + }); + }); + + server.on("close", function() { + that.close(); + }); +}; + +module.exports = AbstractPersistence; diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index 6c59c7e..ccd937f 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -1,6 +1,8 @@ var levelup = require("levelup"); var sublevel = require("level-sublevel"); +var AbstractPersistence = require("./abstract"); +var util = require("util"); function LevelUpPersistance(path, options) { options = options || {}; @@ -9,6 +11,8 @@ function LevelUpPersistance(path, options) { this._retained = this.db.sublevel("retained"); } +util.inherits(LevelUpPersistance, AbstractPersistence); + LevelUpPersistance.prototype.storeRetained = function(packet, cb) { this._retained.put(packet.topic, packet, cb); }; diff --git a/lib/persistance/memory.js b/lib/persistance/memory.js index e20e67f..89ac3c0 100644 --- a/lib/persistance/memory.js +++ b/lib/persistance/memory.js @@ -2,7 +2,7 @@ var LevelUpPersistance = require("./levelup"); var util = require("util"); var MemDOWN = require("memdown"); -var factory = function (location) { return new MemDOWN(location) }; +var factory = function (location) { return new MemDOWN(location); }; function MemoryPersistance() { LevelUpPersistance.call(this, "RAM", { db: factory }); diff --git a/lib/server.js b/lib/server.js index 55b652c..5633ad6 100644 --- a/lib/server.js +++ b/lib/server.js @@ -29,7 +29,7 @@ var persistance = require("./persistance"); * the client is passed as a parameter. * - `published`, when a new message is published; * the packet and the client are passed as parameters. - * - `subcribed`, when a new client is subscribed to a pattern; + * - `subscribed`, when a new client is subscribed to a pattern; * the pattern and the client are passed as parameters. * * @param {Object} opts The option object @@ -71,8 +71,6 @@ function Server(opts, callback) { this.ascoltatore = ascoltatori.build(this.opts.backend); this.ascoltatore.on("error", this.emit.bind(this)); - this.persistance = this.opts.persistance || new persistance.Memory(); - that.once("ready", callback); async.series([ @@ -96,24 +94,6 @@ function Server(opts, callback) { that.on("clientDisconnected", function(client) { delete that.clients[client.id]; }); - - that.on("published", function(packet) { - if (packet.retain) { - that.persistance.storeRetained(packet); - } - }); - - that.on("subscribed", function(pattern, client) { - that.persistance.lookupRetained(pattern, function(err, matches) { - if (err) { - client.emit("error", err); - return; - } - matches.forEach(function(match) { - client.forward(match.topic, match.payload, match, pattern); - }); - }); - }); } module.exports = Server; diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index 7648d0a..70c2e6d 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -1,6 +1,7 @@ "use strict"; var async = require("async"); +var EventEmitter = require("events").EventEmitter; module.exports = function(create) { @@ -93,4 +94,51 @@ module.exports = function(create) { } ], done); }); + + it("should wire itself up to the 'published' event of a Server", function(done) { + var em = new EventEmitter(); + var packet1 = { + topic: "hello/1", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }; + + instance.wire(em); + + em.emit("published", packet1); + + instance.lookupRetained(packet1.topic, function(err, results) { + expect(results).to.eql([packet1]); + done(); + }); + }); + + it("should wire itself up to the 'subscribed' event of a Server", function(done) { + var em = new EventEmitter(); + var packet1 = { + topic: "hello/1", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }; + + var client = { + forward: function(topic, payload, options, pattern) { + expect(topic).to.eql(packet1.topic); + expect(payload).to.eql(packet1.payload); + expect(options).to.eql(packet1); + expect(pattern).to.eql("hello/#"); + done(); + } + }; + + instance.wire(em); + + instance.storeRetained(packet1, function() { + em.emit("subscribed", "hello/#", client); + }); + }); }; diff --git a/test/persistance/levelup_spec.js b/test/persistance/levelup_spec.js index 0624d20..32e15d4 100644 --- a/test/persistance/levelup_spec.js +++ b/test/persistance/levelup_spec.js @@ -4,7 +4,7 @@ var abstract = require("./abstract"); var LevelUp = require("../../").persistance.LevelUp; var tmp = require("tmp"); -describe("mosca.persitance.LevelUp", function() { +describe("mosca.persistance.LevelUp", function() { abstract(function(cb) { tmp.dir(function (err, path) { if (err) { diff --git a/test/persistance/memory_spec.js b/test/persistance/memory_spec.js index 72f1682..9467020 100644 --- a/test/persistance/memory_spec.js +++ b/test/persistance/memory_spec.js @@ -3,7 +3,7 @@ var abstract = require("./abstract"); var Memory = require("../../").persistance.Memory; -describe("mosca.persitance.Memory", function() { +describe("mosca.persistance.Memory", function() { abstract(function(cb) { cb(null, new Memory()); }); diff --git a/test/server_spec.js b/test/server_spec.js index 410c155..7283407 100644 --- a/test/server_spec.js +++ b/test/server_spec.js @@ -1035,6 +1035,9 @@ describe("mosca.Server", function() { }); it("should support retained messages", function(done) { + var pers = new mosca.persistance.Memory(); + + pers.wire(instance); async.waterfall([ From 4b953cf106ebf167637b322e7dba2864b2f2a2ec Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 18 Jun 2013 17:12:31 +0100 Subject: [PATCH 03/30] Fixed spurious failure in the levelup tests. --- test/persistance/abstract.js | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index 70c2e6d..105faae 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -5,21 +5,21 @@ var EventEmitter = require("events").EventEmitter; module.exports = function(create) { - var instance = null; - beforeEach(function(done) { + var that = this; create(function(err, result) { if (err) { return done(err); } - instance = result; + that.instance = result; done(); }); }); afterEach(function(done) { - instance.close(done); + this.instance.close(done); + this.instance = null; }); it("should store retain messages", function(done) { @@ -30,11 +30,11 @@ module.exports = function(create) { messageId: 42, retain: true }; - instance.storeRetained(packet, done); + this.instance.storeRetained(packet, done); }); it("should lookup retain messages and not matching", function(done) { - instance.lookupRetained("hello", function(err, results) { + this.instance.lookupRetained("hello", function(err, results) { expect(results).to.eql([]); done(); }); @@ -49,6 +49,8 @@ module.exports = function(create) { retain: true }; + var instance = this.instance; + async.series([ function(cb) { instance.storeRetained(packet, cb); @@ -79,6 +81,8 @@ module.exports = function(create) { retain: true }; + var instance = this.instance; + async.series([ function(cb) { instance.storeRetained(packet1, cb); @@ -97,6 +101,7 @@ module.exports = function(create) { it("should wire itself up to the 'published' event of a Server", function(done) { var em = new EventEmitter(); + var instance = this.instance; var packet1 = { topic: "hello/1", qos: 0, @@ -109,14 +114,17 @@ module.exports = function(create) { em.emit("published", packet1); - instance.lookupRetained(packet1.topic, function(err, results) { - expect(results).to.eql([packet1]); - done(); - }); + setTimeout(function() { + instance.lookupRetained(packet1.topic, function(err, results) { + expect(results).to.eql([packet1]); + done(); + }); + }, 20); // 20ms will suffice }); it("should wire itself up to the 'subscribed' event of a Server", function(done) { var em = new EventEmitter(); + var instance = this.instance; var packet1 = { topic: "hello/1", qos: 0, @@ -137,8 +145,10 @@ module.exports = function(create) { instance.wire(em); - instance.storeRetained(packet1, function() { - em.emit("subscribed", "hello/#", client); - }); + setTimeout(function() { + instance.storeRetained(packet1, function() { + em.emit("subscribed", "hello/#", client); + }); + }, 20); // 20ms will suffice }); }; From 69d1d15961bb271fd20ddb2c765f29bec6ba479f Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 18 Jun 2013 18:14:34 +0100 Subject: [PATCH 04/30] Restoring a client's subscriptions after a disconnect. --- lib/client.js | 4 + lib/persistance/abstract.js | 21 ++ lib/persistance/levelup.js | 25 +++ lib/server.js | 2 + test/persistance/abstract.js | 384 +++++++++++++++++++++++++---------- test/server_spec.js | 111 +++++++++- 6 files changed, 433 insertions(+), 114 deletions(-) diff --git a/lib/client.js b/lib/client.js index 55e48b5..c02e723 100644 --- a/lib/client.js +++ b/lib/client.js @@ -235,6 +235,8 @@ Client.prototype.handleConnect = function(packet) { that.keepalive = packet.keepalive; that.will = packet.will; + that.clean = packet.clean; + logger.info("client connected"); that.setUpTimer(); @@ -405,6 +407,8 @@ Client.prototype.handleAuthorizePublish = function(err, success, packet) { Client.prototype.onClose = function() { var that = this, logger = that.logger; + this.server.emit("clientDisconnecting", that); + this.unsubAndClose(function() { if (that.will) { logger.info({ willTopic: that.will.topic }, "delivering last will"); diff --git a/lib/persistance/abstract.js b/lib/persistance/abstract.js index 69cc076..96d5bc1 100644 --- a/lib/persistance/abstract.js +++ b/lib/persistance/abstract.js @@ -27,6 +27,27 @@ AbstractPersistence.prototype.wire = function(server) { server.on("close", function() { that.close(); }); + + server.on("clientConnected", function(client) { + that.lookupSubscriptions(client, function(err, subscriptions) { + if (err) { + client.emit("error", err); + return; + } + + Object.keys(subscriptions).forEach(function(topic) { + client.handleAuthorizeSubscribe( + null, true, { + topic: topic, + qos: subscriptions[topic] + }, function() {}); + }); + }); + }); + + server.on("clientDisconnecting", function(client) { + that.storeSubscriptions(client); + }); }; module.exports = AbstractPersistence; diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index ccd937f..f40a8d7 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -9,6 +9,7 @@ function LevelUpPersistance(path, options) { options.valueEncoding = "json"; this.db = sublevel(levelup(path, options)); this._retained = this.db.sublevel("retained"); + this._subscriptions = this.db.sublevel("subscriptions"); } util.inherits(LevelUpPersistance, AbstractPersistence); @@ -36,6 +37,30 @@ LevelUpPersistance.prototype.lookupRetained = function(pattern, cb) { }); }; +LevelUpPersistance.prototype.storeSubscriptions = function(client, done) { + if (!client.clean) { + this._subscriptions.put(client.id, client.subscriptions, done); + } else if (done) { + done(); + } +}; + +LevelUpPersistance.prototype.lookupSubscriptions = function(client, done) { + var that = this; + this._subscriptions.get(client.id, function(err, subscriptions) { + if (subscriptions && client.clean) { + that._subscriptions.del(client.id, function() { + done(null, {}); + }); + } else if (!subscriptions) { + subscriptions = {}; + done(null, subscriptions); + } else { + done(null, subscriptions); + } + }); +}; + LevelUpPersistance.prototype.close = function(cb) { this.db.close(cb); }; diff --git a/lib/server.js b/lib/server.js index 5633ad6..993f3d9 100644 --- a/lib/server.js +++ b/lib/server.js @@ -25,6 +25,8 @@ var persistance = require("./persistance"); * Events: * - `clientConnected`, when a client is connected; * the client is passed as a parameter. + * - `clientDisconnecting`, when a client is being disconnected; + * the client is passed as a parameter. * - `clientDisconnected`, when a client is disconnected; * the client is passed as a parameter. * - `published`, when a new message is published; diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index 105faae..7fcd05b 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -22,133 +22,293 @@ module.exports = function(create) { this.instance = null; }); - it("should store retain messages", function(done) { - var packet = { - topic: "hello", - qos: 0, - payload: "world", - messageId: 42, - retain: true - }; - this.instance.storeRetained(packet, done); - }); + describe("retained messages", function() { - it("should lookup retain messages and not matching", function(done) { - this.instance.lookupRetained("hello", function(err, results) { - expect(results).to.eql([]); - done(); + it("should store retain messages", function(done) { + var packet = { + topic: "hello", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }; + this.instance.storeRetained(packet, done); + }); + + it("should lookup retain messages and not matching", function(done) { + this.instance.lookupRetained("hello", function(err, results) { + expect(results).to.eql([]); + done(); + }); + }); + + it("should match and load a retain message", function(done) { + var packet = { + topic: "hello", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }; + + var instance = this.instance; + + async.series([ + function(cb) { + instance.storeRetained(packet, cb); + }, + function(cb) { + instance.lookupRetained("hello", function(err, results) { + expect(results[0]).to.eql(packet); + cb(); + }); + } + ], done); + }); + + it("should match and load with a pattern", function(done) { + var packet1 = { + topic: "hello/1", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }; + + var packet2 = { + topic: "hello/2", + qos: 0, + payload: "world", + messageId: 43, + retain: true + }; + + var instance = this.instance; + + async.series([ + function(cb) { + instance.storeRetained(packet1, cb); + }, + function(cb) { + instance.storeRetained(packet2, cb); + }, + function(cb) { + instance.lookupRetained("hello/#", function(err, results) { + expect(results).to.eql([packet1, packet2]); + cb(); + }); + } + ], done); }); - }); - it("should match and load a retain message", function(done) { - var packet = { - topic: "hello", - qos: 0, - payload: "world", - messageId: 42, - retain: true - }; - - var instance = this.instance; - - async.series([ - function(cb) { - instance.storeRetained(packet, cb); - }, - function(cb) { - instance.lookupRetained("hello", function(err, results) { - expect(results[0]).to.eql(packet); - cb(); + it("should wire itself up to the 'published' event of a Server", function(done) { + var em = new EventEmitter(); + var instance = this.instance; + var packet1 = { + topic: "hello/1", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }; + + instance.wire(em); + + em.emit("published", packet1); + + setTimeout(function() { + instance.lookupRetained(packet1.topic, function(err, results) { + expect(results).to.eql([packet1]); + done(); }); - } - ], done); - }); + }, 20); // 20ms will suffice + }); + + it("should wire itself up to the 'subscribed' event of a Server", function(done) { + var em = new EventEmitter(); + var instance = this.instance; + var packet1 = { + topic: "hello/1", + qos: 0, + payload: "world", + messageId: 42, + retain: true + }; + + var client = { + forward: function(topic, payload, options, pattern) { + expect(topic).to.eql(packet1.topic); + expect(payload).to.eql(packet1.payload); + expect(options).to.eql(packet1); + expect(pattern).to.eql("hello/#"); + done(); + } + }; - it("should match and load with a pattern", function(done) { - var packet1 = { - topic: "hello/1", - qos: 0, - payload: "world", - messageId: 42, - retain: true - }; - - var packet2 = { - topic: "hello/2", - qos: 0, - payload: "world", - messageId: 43, - retain: true - }; - - var instance = this.instance; - - async.series([ - function(cb) { - instance.storeRetained(packet1, cb); - }, - function(cb) { - instance.storeRetained(packet2, cb); - }, - function(cb) { - instance.lookupRetained("hello/#", function(err, results) { - expect(results).to.eql([packet1, packet2]); - cb(); + instance.wire(em); + + setTimeout(function() { + instance.storeRetained(packet1, function() { + em.emit("subscribed", "hello/#", client); }); - } - ], done); - }); + }, 20); // 20ms will suffice + }); - it("should wire itself up to the 'published' event of a Server", function(done) { - var em = new EventEmitter(); - var instance = this.instance; - var packet1 = { - topic: "hello/1", - qos: 0, - payload: "world", - messageId: 42, - retain: true - }; + }); - instance.wire(em); + describe("subscriptions", function() { - em.emit("published", packet1); + it("should store the an offline client subscriptions", function(done) { + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; + this.instance.storeSubscriptions(client, done); + }); - setTimeout(function() { - instance.lookupRetained(packet1.topic, function(err, results) { - expect(results).to.eql([packet1]); + it("should load the offline client subscriptions", function(done) { + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; + this.instance.lookupSubscriptions(client, function(err, results) { + expect(results).to.eql({}); done(); }); - }, 20); // 20ms will suffice - }); + }); - it("should wire itself up to the 'subscribed' event of a Server", function(done) { - var em = new EventEmitter(); - var instance = this.instance; - var packet1 = { - topic: "hello/1", - qos: 0, - payload: "world", - messageId: 42, - retain: true - }; - - var client = { - forward: function(topic, payload, options, pattern) { - expect(topic).to.eql(packet1.topic); - expect(payload).to.eql(packet1.payload); - expect(options).to.eql(packet1); - expect(pattern).to.eql("hello/#"); - done(); - } - }; + it("should store and load the an offline client subscriptions", function(done) { + var instance = this.instance; + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; + + instance.storeSubscriptions(client, function() { + instance.lookupSubscriptions(client, function(err, results) { + expect(results).to.eql(client.subscriptions); + done(); + }); + }); + }); + + it("should not store the subscriptions of clean client", function(done) { + var instance = this.instance; + var client = { + id: "my client id - 42", + clean: true, + subscriptions: { + hello: 1 + } + }; - instance.wire(em); + instance.storeSubscriptions(client, function() { + client.clean = false; + instance.lookupSubscriptions(client, function(err, results) { + expect(results).to.eql({}); + done(); + }); + }); + }); + + it("should load an empty subscriptions object for a clean client", function(done) { + var instance = this.instance; + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; - setTimeout(function() { - instance.storeRetained(packet1, function() { - em.emit("subscribed", "hello/#", client); + instance.storeSubscriptions(client, function() { + client.clean = true; + instance.lookupSubscriptions(client, function(err, results) { + expect(results).to.eql({}); + done(); + }); }); - }, 20); // 20ms will suffice + }); + + it("should clean up the subscription store if a clean client connects", function(done) { + var instance = this.instance; + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; + + instance.storeSubscriptions(client, function() { + client.clean = true; + instance.lookupSubscriptions(client, function(err, results) { + client.clean = false; + instance.lookupSubscriptions(client, function(err, results) { + expect(results).to.eql({}); + done(); + }); + }); + }); + }); + + it("should wire itself up to the 'clientConnected' event of a Server", function(done) { + var em = new EventEmitter(); + var instance = this.instance; + + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + }, + handleAuthorizeSubscribe: function(err, success, subscription, callback) { + expect(success).to.eql(true); + expect(subscription).to.eql({ topic: "hello", qos: 1 }); + expect(callback).to.be.a("function"); + done(); + } + }; + + instance.wire(em); + + setTimeout(function() { + instance.storeSubscriptions(client, function() { + em.emit("clientConnected", client); + }); + }, 20); // 20ms will suffice + }); + + it("should wire itself up to the 'clientDisconnecting' event of a Server", function(done) { + var em = new EventEmitter(); + var instance = this.instance; + + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; + + instance.wire(em); + em.emit("clientDisconnecting", client); + + setTimeout(function() { + instance.lookupSubscriptions(client, function(err, results) { + expect(results).to.eql(client.subscriptions); + done(); + }); + }, 20); // 20ms will suffice + }); }); }; diff --git a/test/server_spec.js b/test/server_spec.js index 7283407..d740801 100644 --- a/test/server_spec.js +++ b/test/server_spec.js @@ -63,10 +63,16 @@ describe("mosca.Server", function() { } - function buildAndConnect(done, callback) { + function buildAndConnect(done, opts, callback) { + + if (typeof opts === "function") { + callback = opts; + opts = buildOpts(); + } + buildClient(done, function(client) { - client.connect(buildOpts()); + client.connect(opts); client.on('connack', function(packet) { callback(client); @@ -1094,4 +1100,105 @@ describe("mosca.Server", function() { } ]); }); + + it.skip("should support unclean clients", function(done) { + var pers = new mosca.persistance.Memory(); + var opts = buildOpts(); + + opts.clientId = "mosca-unclean-clients-test"; + opts.clean = false; + + pers.wire(instance); + + async.series([ + + function(cb) { + buildAndConnect(cb, opts, function(client) { + var subscriptions = [{ + topic: "hello", + qos: 0 + }]; + + client.subscribe({ + subscriptions: subscriptions, + messageId: 42 + }); + + client.on("suback", function() { + client.stream.end(); + }); + }); + }, + + function(cb) { + buildAndConnect(cb, function(client) { + client.publish({ + topic: "hello", + qos: 0, + payload: "world", + messageId: 42 + }); + client.stream.end(); + }); + }, + + function(cb) { + buildAndConnect(cb, opts, function(client) { + client.on("publish", function(packet) { + expect(packet.topic).to.be.eql("hello"); + expect(packet.payload).to.be.eql("world"); + client.disconnect(); + }); + }); + } + ], done); + }); + + it("should restore subscriptions for uncleaned clients", function(done) { + var pers = new mosca.persistance.Memory(); + var opts = buildOpts(); + + opts.clientId = "mosca-unclean-clients-test"; + opts.clean = false; + + pers.wire(instance); + + async.series([ + + function(cb) { + buildAndConnect(cb, opts, function(client) { + var subscriptions = [{ + topic: "hello", + qos: 0 + }]; + + client.subscribe({ + subscriptions: subscriptions, + messageId: 42 + }); + + client.on("suback", function() { + client.stream.end(); + }); + }); + }, + + function(cb) { + buildAndConnect(cb, opts, function(client) { + client.publish({ + topic: "hello", + qos: 0, + payload: "world", + messageId: 42 + }); + + client.on("publish", function(packet) { + expect(packet.topic).to.be.eql("hello"); + expect(packet.payload).to.be.eql("world"); + client.disconnect(); + }); + }); + } + ], done); + }); }); From a8785e2a927e977f652d2c99f80a4c12b2079f45 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 18 Jun 2013 18:20:04 +0100 Subject: [PATCH 05/30] Using a global nop instead of creating an empty function. --- lib/persistance/abstract.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/persistance/abstract.js b/lib/persistance/abstract.js index 96d5bc1..4006147 100644 --- a/lib/persistance/abstract.js +++ b/lib/persistance/abstract.js @@ -5,6 +5,7 @@ function AbstractPersistence() { AbstractPersistence.prototype.wire = function(server) { var that = this; + var nop = function() {}; server.on("published", function(packet) { if (packet.retain) { @@ -37,10 +38,10 @@ AbstractPersistence.prototype.wire = function(server) { Object.keys(subscriptions).forEach(function(topic) { client.handleAuthorizeSubscribe( - null, true, { + null, true, { topic: topic, qos: subscriptions[topic] - }, function() {}); + }, nop); }); }); }); From a193e4df55c77503aa59d5ed2e746c5a3a169801 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 18 Jun 2013 19:25:13 +0100 Subject: [PATCH 06/30] Added the support for uncleaned clients. --- lib/persistance/abstract.js | 10 +++ lib/persistance/levelup.js | 76 +++++++++++++++-- package.json | 3 +- test/persistance/abstract.js | 153 +++++++++++++++++++++++++++++++++++ test/server_spec.js | 2 +- 5 files changed, 234 insertions(+), 10 deletions(-) diff --git a/lib/persistance/abstract.js b/lib/persistance/abstract.js index 4006147..c6c93de 100644 --- a/lib/persistance/abstract.js +++ b/lib/persistance/abstract.js @@ -11,6 +11,7 @@ AbstractPersistence.prototype.wire = function(server) { if (packet.retain) { that.storeRetained(packet); } + that.storeOfflinePacket(packet); }); server.on("subscribed", function(pattern, client) { @@ -44,6 +45,15 @@ AbstractPersistence.prototype.wire = function(server) { }, nop); }); }); + + that.streamOfflinePackets(client, function(err, packet) { + if (err) { + client.emit("error", err); + return; + } + + client.forward(packet.topic, packet.payload, packet, packet.topic); + }); }); server.on("clientDisconnecting", function(client) { diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index f40a8d7..99cb0cd 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -3,13 +3,16 @@ var levelup = require("levelup"); var sublevel = require("level-sublevel"); var AbstractPersistence = require("./abstract"); var util = require("util"); +var range = require('level-range'); function LevelUpPersistance(path, options) { options = options || {}; options.valueEncoding = "json"; this.db = sublevel(levelup(path, options)); this._retained = this.db.sublevel("retained"); + this._clientSubscriptions = this.db.sublevel("clientSubscriptions"); this._subscriptions = this.db.sublevel("subscriptions"); + this._offlinePackets = this.db.sublevel("offlinePackets"); } util.inherits(LevelUpPersistance, AbstractPersistence); @@ -38,27 +41,84 @@ LevelUpPersistance.prototype.lookupRetained = function(pattern, cb) { }; LevelUpPersistance.prototype.storeSubscriptions = function(client, done) { + var that = this; if (!client.clean) { - this._subscriptions.put(client.id, client.subscriptions, done); + this._clientSubscriptions.put(client.id, client.subscriptions, done); + Object.keys(client.subscriptions).forEach(function(key) { + that._subscriptions.put(util.format("%s:%s", key, client.id), { + client: client.id, + topic: key, + qos: client.subscriptions[key] + }); + }); } else if (done) { done(); } }; +var nop = function() {}; LevelUpPersistance.prototype.lookupSubscriptions = function(client, done) { var that = this; - this._subscriptions.get(client.id, function(err, subscriptions) { + this._clientSubscriptions.get(client.id, function(err, subscriptions) { if (subscriptions && client.clean) { - that._subscriptions.del(client.id, function() { - done(null, {}); + that._clientSubscriptions.del(client.id, function() { + that.streamOfflinePackets(client, nop, function() { + Object.keys(subscriptions).forEach(function(key) { + that._subscriptions.del(util.format("%s:%s", key, client.id)); + }); + + if (done) { + done(null, {}); + } + }); }); - } else if (!subscriptions) { - subscriptions = {}; - done(null, subscriptions); } else { - done(null, subscriptions); + if (!subscriptions) { + subscriptions = {}; + } + + if (done) { + done(null, subscriptions); + } + } + }); +}; + +LevelUpPersistance.prototype.storeOfflinePacket = function(packet, done) { + var that = this; + var stream = range(that._subscriptions, '%s:', packet.topic); + + stream.on("data", function(data) { + var sub = data.value; + that._offlinePackets.put(util.format("%s:%s", sub.client, new Date()), packet); + }); + + if (done) { + stream.on("error", done); + stream.on("end", done); + } +}; + +LevelUpPersistance.prototype.streamOfflinePackets = function(client, cb, done) { + + var that = this; + var stream = range(that._offlinePackets, '%s:', client.id); + stream.on("data", function(data) { + var key = util.format("%s:%s", client.id, data.key); + that._offlinePackets.del(key); + + if (!client.clean) { + cb(null, data.value); } }); + + if (cb) { + stream.on("error", cb); + } + + if (done) { + stream.on("end", done); + } }; LevelUpPersistance.prototype.close = function(cb) { diff --git a/package.json b/package.json index b7ef7f1..181d407 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,8 @@ "levelup": "~0.10.0", "level-sublevel": "~4.7.0", "tmp": "0.0.20", - "leveldown": "~0.6.1" + "leveldown": "~0.6.1", + "level-range": "0.0.0" }, "optionalDependencies": { "zmq": "~2.4.0", diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index 7fcd05b..642ecca 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -311,4 +311,157 @@ module.exports = function(create) { }, 20); // 20ms will suffice }); }); + + describe("offline packets", function() { + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; + + var packet = { + topic: "hello", + qos: 0, + payload: "world", + messageId: 42 + }; + + beforeEach(function(done) { + this.instance.storeSubscriptions(client, done); + }); + + it("should store an offline packet", function(done) { + this.instance.storeOfflinePacket(packet, done) + }); + + it("should not stream any offline packet", function(done) { + this.instance.streamOfflinePackets(client, function(err, packet) { + done(new Error("this should never be called")); + }); + done(); + }); + + it("should store and stream an offline packet", function(done) { + var instance = this.instance; + instance.storeOfflinePacket(packet, function() { + instance.streamOfflinePackets(client, function(err, p) { + expect(p).to.eql(packet); + done(); + }); + }) + }); + + it("should delete the offline packets once streamed", function(done) { + var instance = this.instance; + instance.storeOfflinePacket(packet, function() { + instance.streamOfflinePackets(client, function(err, p) { + instance.streamOfflinePackets(client, function(err, p2) { + done(new Error("this should never be called")); + }); + done(); + }); + }) + }); + + it("should clean up the offline packets store if a clean client connects", function(done) { + var instance = this.instance; + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; + + instance.storeOfflinePacket(packet, function() { + client.clean = true; + instance.lookupSubscriptions(client, function(err, results) { + client.clean = false; + instance.streamOfflinePackets(client, function(err, p) { + done(new Error("this should never be called")); + }); + done(); + }); + }); + }); + + it("should not store any offline packet for a clean client", function(done) { + var instance = this.instance; + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; + + client.clean = true; + instance.lookupSubscriptions(client, function(err, results) { + instance.storeOfflinePacket(packet, function() { + client.clean = false; + instance.streamOfflinePackets(client, function(err, p) { + done(new Error("this should never be called")); + }); + done(); + }); + }); + }); + + it("should not stream any offline packet to a clean client", function(done) { + var instance = this.instance; + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; + + instance.storeOfflinePacket(packet, function() { + client.clean = true; + instance.streamOfflinePackets(client, function(err, p) { + done(new Error("this should never be called")); + }); + done(); + }); + }); + + it("should wire itself up to the 'published' event of a Server", function(done) { + var em = new EventEmitter(); + var instance = this.instance; + + instance.wire(em); + em.emit("published", packet); + + setTimeout(function() { + instance.streamOfflinePackets(client, function(err, p1) { + expect(p1).to.eql(packet); + done(); + }); + }, 20); // 20ms will suffice + }); + + it("should wire itself up to the 'clientConnected' event of a Server", function(done) { + var em = new EventEmitter(); + var instance = this.instance; + + instance.wire(em); + + client.forward = function(topic, payload, options, pattern) { + expect(topic).to.eql(packet.topic); + expect(payload).to.eql(packet.payload); + expect(options).to.eql(packet); + expect(pattern).to.eql("hello"); + done(); + }; + + client.handleAuthorizeSubscribe = function() {}; + + instance.storeOfflinePacket(packet, function() { + em.emit("clientConnected", client); + }); + }); + }); }; diff --git a/test/server_spec.js b/test/server_spec.js index d740801..d857a96 100644 --- a/test/server_spec.js +++ b/test/server_spec.js @@ -1101,7 +1101,7 @@ describe("mosca.Server", function() { ]); }); - it.skip("should support unclean clients", function(done) { + it("should support unclean clients", function(done) { var pers = new mosca.persistance.Memory(); var opts = buildOpts(); From 440ede7c2a694397c5f5c98f3afd45a6b20593f0 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Wed, 19 Jun 2013 13:54:01 +0100 Subject: [PATCH 07/30] Added the TTL to the persistance. The dependency to node-level-ttl is to the git repo, as it needs a patch that is still not on NPM. --- lib/persistance/levelup.js | 30 ++++++++++++++++++++--- lib/persistance/memory.js | 6 +++-- package.json | 3 ++- test/persistance/abstract.js | 42 +++++++++++++++++++++++++++----- test/persistance/levelup_spec.js | 4 +-- test/persistance/memory_spec.js | 4 +-- test/server_spec.js | 10 ++++++++ 7 files changed, 82 insertions(+), 17 deletions(-) diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index 99cb0cd..498978f 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -4,11 +4,24 @@ var sublevel = require("level-sublevel"); var AbstractPersistence = require("./abstract"); var util = require("util"); var range = require('level-range'); +var ttl = require('level-ttl'); function LevelUpPersistance(path, options) { options = options || {}; options.valueEncoding = "json"; - this.db = sublevel(levelup(path, options)); + options.ttl = options.ttl || {}; + + // TTL for subscriptions is 1 hour + options.ttl.subscriptions = options.ttl.subscriptions || 60 * 60 * 1000; + + // TTL for packets is 1 hour + options.ttl.packets = options.ttl.packets || 60 * 60 * 1000; + + // the checkFrequency is 1 minute + options.ttl.checkFrequency = options.ttl.checkFrequency || 60 * 1000; + + this.options = options; + this.db = ttl(levelup(path, options), options.ttl); this._retained = this.db.sublevel("retained"); this._clientSubscriptions = this.db.sublevel("clientSubscriptions"); this._subscriptions = this.db.sublevel("subscriptions"); @@ -42,14 +55,18 @@ LevelUpPersistance.prototype.lookupRetained = function(pattern, cb) { LevelUpPersistance.prototype.storeSubscriptions = function(client, done) { var that = this; + var ttl = { + ttl: that.options.ttl.subscriptions + }; + if (!client.clean) { - this._clientSubscriptions.put(client.id, client.subscriptions, done); + this._clientSubscriptions.put(client.id, client.subscriptions, ttl, done); Object.keys(client.subscriptions).forEach(function(key) { that._subscriptions.put(util.format("%s:%s", key, client.id), { client: client.id, topic: key, qos: client.subscriptions[key] - }); + }, ttl); }); } else if (done) { done(); @@ -87,10 +104,15 @@ LevelUpPersistance.prototype.lookupSubscriptions = function(client, done) { LevelUpPersistance.prototype.storeOfflinePacket = function(packet, done) { var that = this; var stream = range(that._subscriptions, '%s:', packet.topic); + var ttl = { + ttl: that.options.ttl.packets + }; stream.on("data", function(data) { var sub = data.value; - that._offlinePackets.put(util.format("%s:%s", sub.client, new Date()), packet); + that._offlinePackets.put( + util.format("%s:%s", sub.client, new Date()), + packet, ttl); }); if (done) { diff --git a/lib/persistance/memory.js b/lib/persistance/memory.js index 89ac3c0..c0097ee 100644 --- a/lib/persistance/memory.js +++ b/lib/persistance/memory.js @@ -4,8 +4,10 @@ var util = require("util"); var MemDOWN = require("memdown"); var factory = function (location) { return new MemDOWN(location); }; -function MemoryPersistance() { - LevelUpPersistance.call(this, "RAM", { db: factory }); +function MemoryPersistance(opts) { + opts = opts || {}; + opts.db = factory; + LevelUpPersistance.call(this, "RAM", opts); } util.inherits(MemoryPersistance, LevelUpPersistance); diff --git a/package.json b/package.json index 181d407..466e50b 100644 --- a/package.json +++ b/package.json @@ -58,7 +58,8 @@ "level-sublevel": "~4.7.0", "tmp": "0.0.20", "leveldown": "~0.6.1", - "level-range": "0.0.0" + "level-range": "0.0.0", + "level-ttl": "git://github.com/rvagg/node-level-ttl" }, "optionalDependencies": { "zmq": "~2.4.0", diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index 642ecca..7a0f28f 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -4,10 +4,17 @@ var async = require("async"); var EventEmitter = require("events").EventEmitter; module.exports = function(create) { + var opts = { + ttl: { + checkFrequency: 500, + subscriptions: 500, + packets: 500 + } + }; beforeEach(function(done) { var that = this; - create(function(err, result) { + create(opts, function(err, result) { if (err) { return done(err); } @@ -18,8 +25,11 @@ module.exports = function(create) { }); afterEach(function(done) { - this.instance.close(done); - this.instance = null; + var that = this; + setTimeout(function() { + that.instance.close(done); + that.instance = null; + }, 1); }); describe("retained messages", function() { @@ -310,6 +320,26 @@ module.exports = function(create) { }); }, 20); // 20ms will suffice }); + + it("should clean up the subscription store after a TTL", function(done) { + var instance = this.instance; + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: 1 + } + }; + + instance.storeSubscriptions(client, function() { + setTimeout(function() { + instance.lookupSubscriptions(client, function(err, results) { + expect(results).to.eql({}); + done(); + }); + }, opts.ttl.checkFrequency * 3); + }); + }); }); describe("offline packets", function() { @@ -333,7 +363,7 @@ module.exports = function(create) { }); it("should store an offline packet", function(done) { - this.instance.storeOfflinePacket(packet, done) + this.instance.storeOfflinePacket(packet, done); }); it("should not stream any offline packet", function(done) { @@ -350,7 +380,7 @@ module.exports = function(create) { expect(p).to.eql(packet); done(); }); - }) + }); }); it("should delete the offline packets once streamed", function(done) { @@ -362,7 +392,7 @@ module.exports = function(create) { }); done(); }); - }) + }); }); it("should clean up the offline packets store if a clean client connects", function(done) { diff --git a/test/persistance/levelup_spec.js b/test/persistance/levelup_spec.js index 32e15d4..7dd8e2a 100644 --- a/test/persistance/levelup_spec.js +++ b/test/persistance/levelup_spec.js @@ -5,13 +5,13 @@ var LevelUp = require("../../").persistance.LevelUp; var tmp = require("tmp"); describe("mosca.persistance.LevelUp", function() { - abstract(function(cb) { + abstract(function(opts, cb) { tmp.dir(function (err, path) { if (err) { return cb(err); } - cb(null, new LevelUp(path)); + cb(null, new LevelUp(path, opts)); }); }); }); diff --git a/test/persistance/memory_spec.js b/test/persistance/memory_spec.js index 9467020..a2242d7 100644 --- a/test/persistance/memory_spec.js +++ b/test/persistance/memory_spec.js @@ -4,7 +4,7 @@ var abstract = require("./abstract"); var Memory = require("../../").persistance.Memory; describe("mosca.persistance.Memory", function() { - abstract(function(cb) { - cb(null, new Memory()); + abstract(function(opts, cb) { + cb(null, new Memory(opts)); }); }); diff --git a/test/server_spec.js b/test/server_spec.js index d857a96..8a23ae6 100644 --- a/test/server_spec.js +++ b/test/server_spec.js @@ -1130,6 +1130,11 @@ describe("mosca.Server", function() { }); }, + function(cb) { + // some time to let the write settle + setTimeout(cb, 5); + }, + function(cb) { buildAndConnect(cb, function(client) { client.publish({ @@ -1183,6 +1188,11 @@ describe("mosca.Server", function() { }); }, + function(cb) { + // some time to let the write settle + setTimeout(cb, 5); + }, + function(cb) { buildAndConnect(cb, opts, function(client) { client.publish({ From d4838ce2efd129e21ceb8cab38b57eb580e26725 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Thu, 20 Jun 2013 11:10:15 +0100 Subject: [PATCH 08/30] Updates node-level-ttl to the released 0.2.0. --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 466e50b..cb961c7 100644 --- a/package.json +++ b/package.json @@ -59,7 +59,7 @@ "tmp": "0.0.20", "leveldown": "~0.6.1", "level-range": "0.0.0", - "level-ttl": "git://github.com/rvagg/node-level-ttl" + "level-ttl": "~0.2.0" }, "optionalDependencies": { "zmq": "~2.4.0", From a33587ff156c7c79e52defb8df628590f8953cc2 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Fri, 21 Jun 2013 09:12:18 +0100 Subject: [PATCH 09/30] Made the persistances work even if called as a Function. --- lib/persistance/levelup.js | 3 +++ lib/persistance/memory.js | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index 498978f..d44e9d3 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -7,6 +7,9 @@ var range = require('level-range'); var ttl = require('level-ttl'); function LevelUpPersistance(path, options) { + if (!(this instanceof LevelUpPersistance)) { + return new LevelUpPersistance(path, options); + } options = options || {}; options.valueEncoding = "json"; options.ttl = options.ttl || {}; diff --git a/lib/persistance/memory.js b/lib/persistance/memory.js index c0097ee..93a61e5 100644 --- a/lib/persistance/memory.js +++ b/lib/persistance/memory.js @@ -5,6 +5,10 @@ var MemDOWN = require("memdown"); var factory = function (location) { return new MemDOWN(location); }; function MemoryPersistance(opts) { + if (!(this instanceof MemoryPersistance)) { + return new MemoryPersistance(path, options); + } + opts = opts || {}; opts.db = factory; LevelUpPersistance.call(this, "RAM", opts); From 031e8ffb1f2679463bf0e1c0fa8494b628140ae2 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Fri, 21 Jun 2013 14:36:16 +0100 Subject: [PATCH 10/30] Added basic persistance layer for redis. --- lib/persistance/index.js | 1 + lib/persistance/memory.js | 8 +- lib/persistance/redis.js | 163 +++++++++++ package.json | 3 +- redis.conf | 471 +++++++++++++++++++++++++++++++ test/persistance/abstract.js | 13 +- test/persistance/levelup_spec.js | 13 +- test/persistance/memory_spec.js | 13 +- test/persistance/redis_spec.js | 26 ++ 9 files changed, 693 insertions(+), 18 deletions(-) create mode 100644 lib/persistance/redis.js create mode 100644 redis.conf create mode 100644 test/persistance/redis_spec.js diff --git a/lib/persistance/index.js b/lib/persistance/index.js index 1161366..77b99bc 100644 --- a/lib/persistance/index.js +++ b/lib/persistance/index.js @@ -1,3 +1,4 @@ module.exports.Memory = require("./memory"); module.exports.LevelUp = require("./levelup"); +module.exports.Redis = require("./redis"); diff --git a/lib/persistance/memory.js b/lib/persistance/memory.js index 93a61e5..c3b2dae 100644 --- a/lib/persistance/memory.js +++ b/lib/persistance/memory.js @@ -4,14 +4,14 @@ var util = require("util"); var MemDOWN = require("memdown"); var factory = function (location) { return new MemDOWN(location); }; -function MemoryPersistance(opts) { +function MemoryPersistance(options) { if (!(this instanceof MemoryPersistance)) { return new MemoryPersistance(path, options); } - opts = opts || {}; - opts.db = factory; - LevelUpPersistance.call(this, "RAM", opts); + options = options || {}; + options.db = factory; + LevelUpPersistance.call(this, "RAM", options); } util.inherits(MemoryPersistance, LevelUpPersistance); diff --git a/lib/persistance/redis.js b/lib/persistance/redis.js new file mode 100644 index 0000000..6ca6ebe --- /dev/null +++ b/lib/persistance/redis.js @@ -0,0 +1,163 @@ + +var AbstractPersistence = require("./abstract"); +var redis = require("redis"); +var util = require("util"); +var Qlobber = require("qlobber").Qlobber; +var async = require("async"); + +function RedisPersistance(options) { + if (!(this instanceof RedisPersistance)) { + return new RedisPersistance(path, options); + } + + options.ttl = options.ttl || {}; + + // TTL for subscriptions is 1 hour + options.ttl.subscriptions = options.ttl.subscriptions || 60 * 60 * 1000; + + // TTL for packets is 1 hour + options.ttl.packets = options.ttl.packets || 60 * 60 * 1000; + + this.options = options; + + this._client = redis.createClient( + options.port, + options.host, + options.redisOptions); + + if (options.password) { + this._client.auth(options.password); + } +} + +util.inherits(RedisPersistance, AbstractPersistence); + +RedisPersistance.prototype.storeRetained = function(packet, cb) { + this._client.hset("retained", packet.topic, JSON.stringify(packet), cb); +}; + +RedisPersistance.prototype.lookupRetained = function(pattern, done) { + var that = this; + var matched = []; + var match = function(topic, cb) { + that._client.hget("retained", topic, function(err, packet) { + if (packet) { + matched.push(JSON.parse(packet)); + } + + cb(err, matched); + }); + }; + + if (pattern.indexOf("#") >= 0 || pattern.indexOf("+") >= 0) { + var qlobber = new Qlobber({ + separator: "/" + }); + qlobber.add(pattern, true); + + this._client.hkeys("retained", function(err, topics) { + topics.sort(); + topics = topics.filter(function(topic) { + return qlobber.match(topic).length > 0; + }); + + async.each(topics, match, function(err) { + done(err, matched); + }); + }); + + // do something + } else { + match(pattern, done); + } +}; + +RedisPersistance.prototype.storeSubscriptions = function(client, cb) { + if (client.clean) { + return cb(); + } + var clientSubKey = "client:sub:" + client.id; + + var op = this._client.multi() + .set(clientSubKey, JSON.stringify(client.subscriptions)) + .expire(clientSubKey, this.options.ttl.subscriptions / 1000); + + Object.keys(client.subscriptions).forEach(function(e) { + op = op.sadd("sub:client:" + e, client.id); + }); + + op.exec(cb); +}; + +RedisPersistance.prototype._cleanClient = function(client, cb) { + if (client.clean) { + this._client.multi() + .del("client:sub:" + client.id) + .del("packets:" + client.id) + .exec(function(err) { + if (cb) { + cb(err, {}); + } + }); + return true; + } + + return false; +}; + + +RedisPersistance.prototype.lookupSubscriptions = function(client, cb) { + if (this._cleanClient(client, cb)) { + return; + } + + this._client.get("client:sub:" + client.id, function(err, result) { + cb(err, JSON.parse(result) || {}); + }); +}; + +RedisPersistance.prototype.storeOfflinePacket = function(packet, done) { + var that = this; + var toDelete = []; + + this._client.smembers("sub:client:" + packet.topic, function(err, clients) { + if (err) { + return cb(err); + } + + async.each(clients, function(client, cb) { + that._client.exists("client:sub:" + client, function(err, result) { + if (result) { + that._client.lpush("packets:" + client, JSON.stringify(packet), cb); + } else { + cb(); + } + }); + }, done); + }); +}; + +RedisPersistance.prototype.streamOfflinePackets = function(client, cb) { + var that = this; + + if (this._cleanClient(client)) { + return; + } + + that._client.rpop("packets:" + client.id, function(err, result) { + if (result) { + cb(null, JSON.parse(result)); + that.streamOfflinePackets(client,cb); + } + }); +}; + +RedisPersistance.prototype.close = function(cb) { + if (cb) { + this._client.on("end", cb); + } + + this._client.quit(); +}; + +module.exports = RedisPersistance; diff --git a/package.json b/package.json index cb961c7..3089f4f 100644 --- a/package.json +++ b/package.json @@ -59,7 +59,8 @@ "tmp": "0.0.20", "leveldown": "~0.6.1", "level-range": "0.0.0", - "level-ttl": "~0.2.0" + "level-ttl": "~0.2.0", + "qlobber": "~0.1.1" }, "optionalDependencies": { "zmq": "~2.4.0", diff --git a/redis.conf b/redis.conf new file mode 100644 index 0000000..7e0b77a --- /dev/null +++ b/redis.conf @@ -0,0 +1,471 @@ +# Redis configuration file example + +# Note on units: when memory size is needed, it is possible to specifiy +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /usr/local/var/run/redis.pid when daemonized. +daemonize no + +# When running daemonized, Redis writes a pid file in /usr/local/var/run/redis.pid by +# default. You can specify a custom pid file location here. +pidfile /usr/local/var/run/redis.pid + +# Accept connections on the specified port, default is 6379. +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for incoming connections. +# +# bind 127.0.0.1 + +# Specify the path for the unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 755 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 300 + +# Set server verbosity to 'debug' +# it can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel verbose + +# Specify the log file name. Also 'stdout' can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################# +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. + +save 900 1 +save 300 10 +save 60 10000 + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# Also the Append Only File will be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir /usr/local/var/db/redis/ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave lost the connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of data data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possilbe to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# of hard to guess so that it will be still available for internal-use +# tools but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possilbe to completely kill a command renaming it into +# an empty string: +# +# rename-command CONFIG "" + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default there +# is no limit, and it's up to the number of file descriptors the Redis process +# is able to open. The special value '0' means no limits. +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 128 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys with an +# EXPIRE set. It will try to start freeing keys that are going to expire +# in little time and preserve keys with a longer time to live. +# Redis will also try to remove objects from free lists if possible. +# +# If all this fails, Redis will start to reply with errors to commands +# that will use more memory, like SET, LPUSH, and so on, and will continue +# to reply to most read-only commands like GET. +# +# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a +# 'state' server or cache, not as a real DB. When Redis is used as a real +# database the memory usage will grow over the weeks, it will be obvious if +# it is going to use too much memory in the long run, and you'll have the time +# to upgrade. With maxmemory after the limit is reached you'll start to get +# errors for write operations, and this may even lead to DB inconsistency. +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached? You can select among five behavior: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key accordingly to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys->random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with all the kind of policies, Redis will return an error on write +# operations, when there are not suitable keys for eviction. +# +# At the date of writing this commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy volatile-lru + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can select as well the sample +# size to check. For instance for default Redis will check three keys and +# pick the one that was used less recently, you can change the sample size +# using the following configuration directive. +# +# maxmemory-samples 3 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. If you can live +# with the idea that the latest records will be lost if something like a crash +# happens this is the preferred way to run Redis. If instead you care a lot +# about your data and don't want to that a single record can get lost you should +# enable the append only mode: when this mode is enabled Redis will append +# every write operation received in the file appendonly.aof. This file will +# be read on startup in order to rebuild the full dataset in memory. +# +# Note that you can have both the async dumps and the append only file if you +# like (you have to comment the "save" statements above to disable the dumps). +# Still if append only mode is enabled Redis will load the data from the +# log file at startup ignoring the dump.rdb file. +# +# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append +# log file in background when it gets too big. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") +# appendfilename appendonly.aof + +# The fsync() call tells the Operating System to actually write data on disk +# instead to wait for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log . Slow, Safest. +# everysec: fsync only if one second passed since the last fsync. Compromise. +# +# The default is "everysec" that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving the durability of Redis is +# the same as "appendfsync none", that in pratical terms means that it is +# possible to lost up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size will growth by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (or if no rewrite happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a precentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 1024 + +################################ VIRTUAL MEMORY ############################### + +### WARNING! Virtual Memory is deprecated in Redis 2.4 +### The use of Virtual Memory is strongly discouraged. + +# Virtual Memory allows Redis to work with datasets bigger than the actual +# amount of RAM needed to hold the whole dataset in memory. +# In order to do so very used keys are taken in memory while the other keys +# are swapped into a swap file, similarly to what operating systems do +# with memory pages. +# +# To enable VM just set 'vm-enabled' to yes, and set the following three +# VM parameters accordingly to your needs. + +# vm-enabled no +# vm-enabled yes + +# This is the path of the Redis swap file. As you can guess, swap files +# can't be shared by different Redis instances, so make sure to use a swap +# file for every redis process you are running. Redis will complain if the +# swap file is already in use. +# +# The best kind of storage for the Redis swap file (that's accessed at random) +# is a Solid State Disk (SSD). +# +# *** WARNING *** if you are using a shared hosting the default of putting +# the swap file under /tmp is not secure. Create a dir with access granted +# only to Redis user and configure Redis to create the swap file there. +# vm-swap-file /tmp/redis.swap + +# vm-max-memory configures the VM to use at max the specified amount of +# RAM. Everything that deos not fit will be swapped on disk *if* possible, that +# is, if there is still enough contiguous space in the swap file. +# +# With vm-max-memory 0 the system will swap everything it can. Not a good +# default, just specify the max amount of RAM you can in bytes, but it's +# better to leave some margin. For instance specify an amount of RAM +# that's more or less between 60 and 80% of your free RAM. +# vm-max-memory 0 + +# Redis swap files is split into pages. An object can be saved using multiple +# contiguous pages, but pages can't be shared between different objects. +# So if your page is too big, small objects swapped out on disk will waste +# a lot of space. If you page is too small, there is less space in the swap +# file (assuming you configured the same number of total swap file pages). +# +# If you use a lot of small objects, use a page size of 64 or 32 bytes. +# If you use a lot of big objects, use a bigger page size. +# If unsure, use the default :) +# vm-page-size 32 + +# Number of total memory pages in the swap file. +# Given that the page table (a bitmap of free/used pages) is taken in memory, +# every 8 pages on disk will consume 1 byte of RAM. +# +# The total swap size is vm-page-size * vm-pages +# +# With the default of 32-bytes memory pages and 134217728 pages Redis will +# use a 4 GB swap file, that will use 16 MB of RAM for the page table. +# +# It's better to use the smallest acceptable value for your application, +# but the default is large in order to work in most conditions. +# vm-pages 134217728 + +# Max number of VM I/O threads running at the same time. +# This threads are used to read/write data from/to swap file, since they +# also encode and decode objects from disk to memory or the reverse, a bigger +# number of threads can help with big objects even if they can't help with +# I/O itself as the physical device may not be able to couple with many +# reads/writes operations at the same time. +# +# The special value of 0 turn off threaded I/O and enables the blocking +# Virtual Memory implementation. +# vm-max-threads 4 + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded in a special way (much more memory efficient) when they +# have at max a given numer of elements, and the biggest element does not +# exceed a given threshold. You can configure this limits with the following +# configuration directives. +# hash-max-zipmap-entries 512 +# hash-max-zipmap-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: +list-max-ziplist-entries 512 +list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happens to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into an hash table +# that is rhashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# active rehashing the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply form time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index 7a0f28f..f535975 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -4,22 +4,16 @@ var async = require("async"); var EventEmitter = require("events").EventEmitter; module.exports = function(create) { - var opts = { - ttl: { - checkFrequency: 500, - subscriptions: 500, - packets: 500 - } - }; beforeEach(function(done) { var that = this; - create(opts, function(err, result) { + create(function(err, result, opts) { if (err) { return done(err); } that.instance = result; + that.opts = opts; done(); }); }); @@ -323,6 +317,7 @@ module.exports = function(create) { it("should clean up the subscription store after a TTL", function(done) { var instance = this.instance; + var that = this; var client = { id: "my client id - 42", clean: false, @@ -337,7 +332,7 @@ module.exports = function(create) { expect(results).to.eql({}); done(); }); - }, opts.ttl.checkFrequency * 3); + }, that.opts.ttl.checkFrequency + 500); }); }); }); diff --git a/test/persistance/levelup_spec.js b/test/persistance/levelup_spec.js index 7dd8e2a..b744545 100644 --- a/test/persistance/levelup_spec.js +++ b/test/persistance/levelup_spec.js @@ -5,13 +5,22 @@ var LevelUp = require("../../").persistance.LevelUp; var tmp = require("tmp"); describe("mosca.persistance.LevelUp", function() { - abstract(function(opts, cb) { + + var opts = { + ttl: { + checkFrequency: 250, + subscriptions: 250, + packets: 250 + } + }; + + abstract(function(cb) { tmp.dir(function (err, path) { if (err) { return cb(err); } - cb(null, new LevelUp(path, opts)); + cb(null, new LevelUp(path, opts), opts); }); }); }); diff --git a/test/persistance/memory_spec.js b/test/persistance/memory_spec.js index a2242d7..d40625f 100644 --- a/test/persistance/memory_spec.js +++ b/test/persistance/memory_spec.js @@ -4,7 +4,16 @@ var abstract = require("./abstract"); var Memory = require("../../").persistance.Memory; describe("mosca.persistance.Memory", function() { - abstract(function(opts, cb) { - cb(null, new Memory(opts)); + + var opts = { + ttl: { + checkFrequency: 250, + subscriptions: 250, + packets: 250 + } + }; + + abstract(function(cb) { + cb(null, new Memory(opts), opts); }); }); diff --git a/test/persistance/redis_spec.js b/test/persistance/redis_spec.js new file mode 100644 index 0000000..1c52c2a --- /dev/null +++ b/test/persistance/redis_spec.js @@ -0,0 +1,26 @@ +"use strict"; + +var abstract = require("./abstract"); +var Redis= require("../../").persistance.Redis; +var redis = require("redis"); + +describe("mosca.persistance.Redis", function() { + + var opts = { + ttl: { + checkFrequency: 1000, + subscriptions: 1000, + packets: 1000 + } + }; + + abstract(function(cb) { + cb(null, new Redis(opts), opts); + }); + + afterEach(function(cb) { + var client = redis.createClient(); + client.flushdb(cb); + client.quit(); + }); +}); From 8ac488ba73ecf4ed96012fff21ee114c268bc3bb Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Fri, 21 Jun 2013 15:34:14 +0100 Subject: [PATCH 11/30] Added support for pattern subscriptions in the persistance. Still there a few todos, mainly due to the restoring of the subscriptions across startup of the persistance engine. --- lib/persistance/abstract.js | 1 + lib/persistance/index.js | 1 + lib/persistance/levelup.js | 45 ++++++++++++++++---------- lib/persistance/memory.js | 3 +- lib/persistance/redis.js | 54 +++++++++++++++++++++----------- test/persistance/abstract.js | 31 ++++++++++++++++++ test/persistance/levelup_spec.js | 3 ++ test/persistance/redis_spec.js | 3 ++ 8 files changed, 105 insertions(+), 36 deletions(-) diff --git a/lib/persistance/abstract.js b/lib/persistance/abstract.js index c6c93de..7132948 100644 --- a/lib/persistance/abstract.js +++ b/lib/persistance/abstract.js @@ -1,3 +1,4 @@ +"use strict"; function AbstractPersistence() { diff --git a/lib/persistance/index.js b/lib/persistance/index.js index 77b99bc..3efe25a 100644 --- a/lib/persistance/index.js +++ b/lib/persistance/index.js @@ -1,3 +1,4 @@ +"use strict"; module.exports.Memory = require("./memory"); module.exports.LevelUp = require("./levelup"); diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index d44e9d3..92b05e5 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -1,3 +1,4 @@ +"use strict"; var levelup = require("levelup"); var sublevel = require("level-sublevel"); @@ -5,6 +6,8 @@ var AbstractPersistence = require("./abstract"); var util = require("util"); var range = require('level-range'); var ttl = require('level-ttl'); +var Qlobber = require("qlobber").Qlobber; +var async = require("async"); function LevelUpPersistance(path, options) { if (!(this instanceof LevelUpPersistance)) { @@ -29,6 +32,12 @@ function LevelUpPersistance(path, options) { this._clientSubscriptions = this.db.sublevel("clientSubscriptions"); this._subscriptions = this.db.sublevel("subscriptions"); this._offlinePackets = this.db.sublevel("offlinePackets"); + this._subLobber = new Qlobber({ separator: "/" }); + + var that = this; + this._subscriptions.createReadStream().on("data", function(data) { + that._subLobber.add(data.value.topic, data.key); + }); } util.inherits(LevelUpPersistance, AbstractPersistence); @@ -65,11 +74,14 @@ LevelUpPersistance.prototype.storeSubscriptions = function(client, done) { if (!client.clean) { this._clientSubscriptions.put(client.id, client.subscriptions, ttl, done); Object.keys(client.subscriptions).forEach(function(key) { - that._subscriptions.put(util.format("%s:%s", key, client.id), { + var sub = { client: client.id, topic: key, qos: client.subscriptions[key] - }, ttl); + }; + var levelKey = util.format("%s:%s", key, client.id); + that._subLobber.add(key, levelKey); + that._subscriptions.put(levelKey, sub, ttl); }); } else if (done) { done(); @@ -84,7 +96,9 @@ LevelUpPersistance.prototype.lookupSubscriptions = function(client, done) { that._clientSubscriptions.del(client.id, function() { that.streamOfflinePackets(client, nop, function() { Object.keys(subscriptions).forEach(function(key) { - that._subscriptions.del(util.format("%s:%s", key, client.id)); + var levelKey = util.format("%s:%s", key, client.id); + that._subLobber.remove(levelKey); + that._subscriptions.del(levelKey); }); if (done) { @@ -106,22 +120,21 @@ LevelUpPersistance.prototype.lookupSubscriptions = function(client, done) { LevelUpPersistance.prototype.storeOfflinePacket = function(packet, done) { var that = this; - var stream = range(that._subscriptions, '%s:', packet.topic); + var subs = this._subLobber.match(packet.topic); var ttl = { - ttl: that.options.ttl.packets + ttl: that.options.ttl.subscriptions }; - stream.on("data", function(data) { - var sub = data.value; - that._offlinePackets.put( - util.format("%s:%s", sub.client, new Date()), - packet, ttl); - }); - - if (done) { - stream.on("error", done); - stream.on("end", done); - } + async.each(subs, function(key, cb) { + that._subscriptions.get(key, function(err, sub) { + if (err) { + return cb(err); + } + that._offlinePackets.put( + util.format("%s:%s", sub.client, new Date()), + packet, ttl, cb); + }); + }, done); }; LevelUpPersistance.prototype.streamOfflinePackets = function(client, cb, done) { diff --git a/lib/persistance/memory.js b/lib/persistance/memory.js index c3b2dae..edcf34b 100644 --- a/lib/persistance/memory.js +++ b/lib/persistance/memory.js @@ -1,3 +1,4 @@ +"use strict"; var LevelUpPersistance = require("./levelup"); var util = require("util"); @@ -6,7 +7,7 @@ var factory = function (location) { return new MemDOWN(location); }; function MemoryPersistance(options) { if (!(this instanceof MemoryPersistance)) { - return new MemoryPersistance(path, options); + return new MemoryPersistance(options); } options = options || {}; diff --git a/lib/persistance/redis.js b/lib/persistance/redis.js index 6ca6ebe..89cf052 100644 --- a/lib/persistance/redis.js +++ b/lib/persistance/redis.js @@ -1,3 +1,4 @@ +"use strict"; var AbstractPersistence = require("./abstract"); var redis = require("redis"); @@ -7,7 +8,7 @@ var async = require("async"); function RedisPersistance(options) { if (!(this instanceof RedisPersistance)) { - return new RedisPersistance(path, options); + return new RedisPersistance(options); } options.ttl = options.ttl || {}; @@ -25,9 +26,26 @@ function RedisPersistance(options) { options.host, options.redisOptions); + this._subLobber = new Qlobber({ separator: "/" }); + if (options.password) { this._client.auth(options.password); } + + var that = this; + + this._client.keys("client:sub:*", function(err, keys) { + async.each(keys, function(key, cb) { + that._client.get(key, function(err, subs) { + var id = key.split(":")[2]; + subs = JSON.parse(subs); + Object.keys(subs).forEach(function(sub) { + that._subLobber.add(sub, id); + }); + cb(); + }); + }); + }); } util.inherits(RedisPersistance, AbstractPersistence); @@ -77,22 +95,32 @@ RedisPersistance.prototype.storeSubscriptions = function(client, cb) { return cb(); } var clientSubKey = "client:sub:" + client.id; + var that = this; var op = this._client.multi() .set(clientSubKey, JSON.stringify(client.subscriptions)) .expire(clientSubKey, this.options.ttl.subscriptions / 1000); Object.keys(client.subscriptions).forEach(function(e) { - op = op.sadd("sub:client:" + e, client.id); + that._subLobber.add(e, client.id); }); op.exec(cb); }; RedisPersistance.prototype._cleanClient = function(client, cb) { + var that = this; if (client.clean) { + var key = "client:sub:" + client.id; + this._client.multi() - .del("client:sub:" + client.id) + .get(key, function(err, subs) { + subs = JSON.parse(subs); + Object.keys(subs).forEach(function(sub) { + that._subLobber.remove(sub, client.id); + }); + }) + .del(key) .del("packets:" + client.id) .exec(function(err) { if (cb) { @@ -118,23 +146,11 @@ RedisPersistance.prototype.lookupSubscriptions = function(client, cb) { RedisPersistance.prototype.storeOfflinePacket = function(packet, done) { var that = this; - var toDelete = []; - - this._client.smembers("sub:client:" + packet.topic, function(err, clients) { - if (err) { - return cb(err); - } - async.each(clients, function(client, cb) { - that._client.exists("client:sub:" + client, function(err, result) { - if (result) { - that._client.lpush("packets:" + client, JSON.stringify(packet), cb); - } else { - cb(); - } - }); - }, done); - }); + var matches = this._subLobber.match(packet.topic); + async.each(matches, function(client, cb) { + that._client.lpush("packets:" + client, JSON.stringify(packet), cb); + }, done); }; RedisPersistance.prototype.streamOfflinePackets = function(client, cb) { diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index f535975..3794a0b 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -489,4 +489,35 @@ module.exports = function(create) { }); }); }); + + describe("offline packets pattern", function() { + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + "hello/#": 1 + } + }; + + var packet = { + topic: "hello/42", + qos: 0, + payload: "world", + messageId: 42 + }; + + beforeEach(function(done) { + this.instance.storeSubscriptions(client, done); + }); + + it("should store and stream an offline packet", function(done) { + var instance = this.instance; + instance.storeOfflinePacket(packet, function() { + instance.streamOfflinePackets(client, function(err, p) { + expect(p).to.eql(packet); + done(); + }); + }); + }); + }); }; diff --git a/test/persistance/levelup_spec.js b/test/persistance/levelup_spec.js index b744545..17974c3 100644 --- a/test/persistance/levelup_spec.js +++ b/test/persistance/levelup_spec.js @@ -23,4 +23,7 @@ describe("mosca.persistance.LevelUp", function() { cb(null, new LevelUp(path, opts), opts); }); }); + + + // TODO test the persistance of two databases }); diff --git a/test/persistance/redis_spec.js b/test/persistance/redis_spec.js index 1c52c2a..52bee64 100644 --- a/test/persistance/redis_spec.js +++ b/test/persistance/redis_spec.js @@ -23,4 +23,7 @@ describe("mosca.persistance.Redis", function() { client.flushdb(cb); client.quit(); }); + + // TODO test the persistance of two concurrent clients + // TODO test the persistance across two runs }); From 48012d8198aa6a2cf831ac0a4dc8b0cb71756658 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Fri, 21 Jun 2013 17:35:26 +0100 Subject: [PATCH 12/30] Added restoration support of the Qlobber data structure for LevelUp. --- lib/persistance/levelup.js | 16 +++++++------- test/persistance/abstract.js | 2 +- test/persistance/levelup_spec.js | 37 +++++++++++++++++++++++++++++++- 3 files changed, 45 insertions(+), 10 deletions(-) diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index 92b05e5..649db7e 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -130,9 +130,9 @@ LevelUpPersistance.prototype.storeOfflinePacket = function(packet, done) { if (err) { return cb(err); } + var key = util.format("%s:%s", sub.client, new Date().toISOString()); that._offlinePackets.put( - util.format("%s:%s", sub.client, new Date()), - packet, ttl, cb); + key, packet, ttl, cb); }); }, done); }; @@ -142,12 +142,12 @@ LevelUpPersistance.prototype.streamOfflinePackets = function(client, cb, done) { var that = this; var stream = range(that._offlinePackets, '%s:', client.id); stream.on("data", function(data) { - var key = util.format("%s:%s", client.id, data.key); - that._offlinePackets.del(key); - - if (!client.clean) { - cb(null, data.value); - } + var key = util.format('%s:%s', client.id, data.key); + that._offlinePackets.del(key, function() { + if (!client.clean) { + cb(null, data.value); + } + }); }); if (cb) { diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index 3794a0b..f909624 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -7,7 +7,7 @@ module.exports = function(create) { beforeEach(function(done) { var that = this; - create(function(err, result, opts) { + create.call(this, function(err, result, opts) { if (err) { return done(err); } diff --git a/test/persistance/levelup_spec.js b/test/persistance/levelup_spec.js index 17974c3..c502085 100644 --- a/test/persistance/levelup_spec.js +++ b/test/persistance/levelup_spec.js @@ -3,6 +3,7 @@ var abstract = require("./abstract"); var LevelUp = require("../../").persistance.LevelUp; var tmp = require("tmp"); +var async = require("async"); describe("mosca.persistance.LevelUp", function() { @@ -15,15 +16,49 @@ describe("mosca.persistance.LevelUp", function() { }; abstract(function(cb) { + var that = this; tmp.dir(function (err, path) { if (err) { return cb(err); } + that.path = path; cb(null, new LevelUp(path, opts), opts); }); }); + describe("two instances", function() { + it("support restoring from disk", function(done) { + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + "hello/#": 1 + } + }; - // TODO test the persistance of two databases + var packet = { + topic: "hello", + qos: 0, + payload: "world", + messageId: 42 + }; + + var that = this; + + this.instance.storeSubscriptions(client, function() { + that.instance.close(function() { + that.instance = new LevelUp(that.path, opts); + setTimeout(function() { + that.instance.storeOfflinePacket(packet, function() { + that.instance.streamOfflinePackets(client, function(err, p) { + expect(p).to.eql(packet); + done(); + }); + }); + }, 10); + }); + }); + }); + }); }); From 53c4d17d660ab644c114d3fd7f477bc7f7d0723f Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Fri, 21 Jun 2013 18:32:10 +0100 Subject: [PATCH 13/30] Added synchronization support for Redis persistance. --- lib/persistance/redis.js | 58 +++++++++++++++++-------- test/persistance/levelup_spec.js | 2 +- test/persistance/redis_spec.js | 74 +++++++++++++++++++++++++++++++- 3 files changed, 113 insertions(+), 21 deletions(-) diff --git a/lib/persistance/redis.js b/lib/persistance/redis.js index 89cf052..cf0c856 100644 --- a/lib/persistance/redis.js +++ b/lib/persistance/redis.js @@ -19,37 +19,57 @@ function RedisPersistance(options) { // TTL for packets is 1 hour options.ttl.packets = options.ttl.packets || 60 * 60 * 1000; - this.options = options; + options.channel = options.channel || "moscaSync"; - this._client = redis.createClient( - options.port, - options.host, - options.redisOptions); + this.options = options; this._subLobber = new Qlobber({ separator: "/" }); - if (options.password) { - this._client.auth(options.password); - } + this._client = this._buildClient(); + this._pubSubClient = this._buildClient(); + + var newSub = function(key, cb) { + that._client.get(key, function(err, subs) { + var id = key.split(":")[2]; + subs = JSON.parse(subs); + Object.keys(subs).forEach(function(sub) { + that._subLobber.add(sub, id); + }); + if (cb) { + cb(); + } + }); + }; + + this._pubSubClient.subscribe(options.channel); + this._pubSubClient.on("message", function(channel, message) { + newSub(message); + }); var that = this; this._client.keys("client:sub:*", function(err, keys) { - async.each(keys, function(key, cb) { - that._client.get(key, function(err, subs) { - var id = key.split(":")[2]; - subs = JSON.parse(subs); - Object.keys(subs).forEach(function(sub) { - that._subLobber.add(sub, id); - }); - cb(); - }); - }); + async.each(keys, newSub); }); } util.inherits(RedisPersistance, AbstractPersistence); +RedisPersistance.prototype._buildClient = function() { + var options = this.options; + var client = redis.createClient( + options.port, + options.host, + options.redisOptions); + + if (options.password) { + client.auth(options.password); + } + + return client; + +}; + RedisPersistance.prototype.storeRetained = function(packet, cb) { this._client.hset("retained", packet.topic, JSON.stringify(packet), cb); }; @@ -99,6 +119,7 @@ RedisPersistance.prototype.storeSubscriptions = function(client, cb) { var op = this._client.multi() .set(clientSubKey, JSON.stringify(client.subscriptions)) + .publish(this.options.channel, clientSubKey) .expire(clientSubKey, this.options.ttl.subscriptions / 1000); Object.keys(client.subscriptions).forEach(function(e) { @@ -173,6 +194,7 @@ RedisPersistance.prototype.close = function(cb) { this._client.on("end", cb); } + this._pubSubClient.end(); this._client.quit(); }; diff --git a/test/persistance/levelup_spec.js b/test/persistance/levelup_spec.js index c502085..03c1e2e 100644 --- a/test/persistance/levelup_spec.js +++ b/test/persistance/levelup_spec.js @@ -38,7 +38,7 @@ describe("mosca.persistance.LevelUp", function() { }; var packet = { - topic: "hello", + topic: "hello/42", qos: 0, payload: "world", messageId: 42 diff --git a/test/persistance/redis_spec.js b/test/persistance/redis_spec.js index 52bee64..e625d55 100644 --- a/test/persistance/redis_spec.js +++ b/test/persistance/redis_spec.js @@ -19,11 +19,81 @@ describe("mosca.persistance.Redis", function() { }); afterEach(function(cb) { + if (this.secondInstance) { + this.secondInstance.close(); + } + var client = redis.createClient(); client.flushdb(cb); client.quit(); }); - // TODO test the persistance of two concurrent clients - // TODO test the persistance across two runs + describe("two clients", function() { + + it("should support restoring", function(done) { + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + "hello/#": 1 + } + }; + + var packet = { + topic: "hello/42", + qos: 0, + payload: "world", + messageId: 42 + }; + + var that = this; + + this.instance.storeSubscriptions(client, function() { + that.instance.close(function() { + that.instance = new Redis(opts); + setTimeout(function() { + that.instance.storeOfflinePacket(packet, function() { + that.instance.streamOfflinePackets(client, function(err, p) { + expect(p).to.eql(packet); + done(); + }); + }); + }, 10); + }); + }); + }); + + it("should support synchronization", function(done) { + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + "hello/#": 1 + } + }; + + var packet = { + topic: "hello/42", + qos: 0, + payload: "world", + messageId: 42 + }; + + var that = this; + that.secondInstance = new Redis(opts); + + setTimeout(function() { + that.instance.storeSubscriptions(client, function() { + setTimeout(function() { + that.secondInstance.storeOfflinePacket(packet, function() { + that.instance.streamOfflinePackets(client, function(err, p) { + expect(p).to.eql(packet); + done(); + }); + }); + }, 10); + }); + }, 10); + }); + }); }); From 397f478d507adb1b8a868d60a44c4e0faaf2cad4 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Fri, 21 Jun 2013 19:27:02 +0100 Subject: [PATCH 14/30] Limiting unclean client handling to QoS 1 subscriptions. --- lib/persistance/abstract.js | 2 +- lib/persistance/levelup.js | 10 ++++- lib/persistance/redis.js | 11 ++++- test/persistance/abstract.js | 72 ++++++++++++++++++++++++++------ test/persistance/levelup_spec.js | 4 +- test/persistance/redis_spec.js | 8 +++- test/server_spec.js | 4 +- 7 files changed, 88 insertions(+), 23 deletions(-) diff --git a/lib/persistance/abstract.js b/lib/persistance/abstract.js index 7132948..262a65e 100644 --- a/lib/persistance/abstract.js +++ b/lib/persistance/abstract.js @@ -42,7 +42,7 @@ AbstractPersistence.prototype.wire = function(server) { client.handleAuthorizeSubscribe( null, true, { topic: topic, - qos: subscriptions[topic] + qos: subscriptions[topic].qos }, nop); }); }); diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index 649db7e..05f7600 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -70,14 +70,20 @@ LevelUpPersistance.prototype.storeSubscriptions = function(client, done) { var ttl = { ttl: that.options.ttl.subscriptions }; + var subscriptions = {}; if (!client.clean) { - this._clientSubscriptions.put(client.id, client.subscriptions, ttl, done); Object.keys(client.subscriptions).forEach(function(key) { + if (client.subscriptions[key].qos > 0) { + subscriptions[key] = client.subscriptions[key]; + } + }); + this._clientSubscriptions.put(client.id, subscriptions, ttl, done); + Object.keys(subscriptions).forEach(function(key) { var sub = { client: client.id, topic: key, - qos: client.subscriptions[key] + qos: subscriptions[key].qos }; var levelKey = util.format("%s:%s", key, client.id); that._subLobber.add(key, levelKey); diff --git a/lib/persistance/redis.js b/lib/persistance/redis.js index cf0c856..0ba1ddd 100644 --- a/lib/persistance/redis.js +++ b/lib/persistance/redis.js @@ -116,13 +116,20 @@ RedisPersistance.prototype.storeSubscriptions = function(client, cb) { } var clientSubKey = "client:sub:" + client.id; var that = this; + var subscriptions = {}; + + Object.keys(client.subscriptions).forEach(function(key) { + if (client.subscriptions[key].qos > 0) { + subscriptions[key] = client.subscriptions[key]; + } + }); var op = this._client.multi() - .set(clientSubKey, JSON.stringify(client.subscriptions)) + .set(clientSubKey, JSON.stringify(subscriptions)) .publish(this.options.channel, clientSubKey) .expire(clientSubKey, this.options.ttl.subscriptions / 1000); - Object.keys(client.subscriptions).forEach(function(e) { + Object.keys(subscriptions).forEach(function(e) { that._subLobber.add(e, client.id); }); diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index f909624..0818bf3 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -167,7 +167,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; this.instance.storeSubscriptions(client, done); @@ -178,7 +180,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; this.instance.lookupSubscriptions(client, function(err, results) { @@ -193,7 +197,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; @@ -211,7 +217,9 @@ module.exports = function(create) { id: "my client id - 42", clean: true, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; @@ -230,7 +238,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; @@ -249,7 +259,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; @@ -273,7 +285,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } }, handleAuthorizeSubscribe: function(err, success, subscription, callback) { expect(success).to.eql(true); @@ -300,7 +314,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; @@ -322,7 +338,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; @@ -335,6 +353,26 @@ module.exports = function(create) { }, that.opts.ttl.checkFrequency + 500); }); }); + + it("should not store a QoS 0 subscription", function(done) { + var instance = this.instance; + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: { + qos: 0 + } + } + }; + + instance.storeSubscriptions(client, function() { + instance.lookupSubscriptions(client, function(err, results) { + expect(results).to.eql({}); + done(); + }); + }); + }); }); describe("offline packets", function() { @@ -342,7 +380,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; @@ -396,7 +436,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; @@ -440,7 +482,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - hello: 1 + hello: { + qos: 1 + } } }; @@ -495,7 +539,9 @@ module.exports = function(create) { id: "my client id - 42", clean: false, subscriptions: { - "hello/#": 1 + "hello/#": { + qos: 1 + } } }; diff --git a/test/persistance/levelup_spec.js b/test/persistance/levelup_spec.js index 03c1e2e..be0b118 100644 --- a/test/persistance/levelup_spec.js +++ b/test/persistance/levelup_spec.js @@ -33,7 +33,9 @@ describe("mosca.persistance.LevelUp", function() { id: "my client id - 42", clean: false, subscriptions: { - "hello/#": 1 + "hello/#": { + qos: 1 + } } }; diff --git a/test/persistance/redis_spec.js b/test/persistance/redis_spec.js index e625d55..c599e45 100644 --- a/test/persistance/redis_spec.js +++ b/test/persistance/redis_spec.js @@ -35,7 +35,9 @@ describe("mosca.persistance.Redis", function() { id: "my client id - 42", clean: false, subscriptions: { - "hello/#": 1 + "hello/#": { + qos: 1 + } } }; @@ -68,7 +70,9 @@ describe("mosca.persistance.Redis", function() { id: "my client id - 42", clean: false, subscriptions: { - "hello/#": 1 + "hello/#": { + qos: 1 + } } }; diff --git a/test/server_spec.js b/test/server_spec.js index 8a23ae6..9d07f7d 100644 --- a/test/server_spec.js +++ b/test/server_spec.js @@ -1116,7 +1116,7 @@ describe("mosca.Server", function() { buildAndConnect(cb, opts, function(client) { var subscriptions = [{ topic: "hello", - qos: 0 + qos: 1 }]; client.subscribe({ @@ -1174,7 +1174,7 @@ describe("mosca.Server", function() { buildAndConnect(cb, opts, function(client) { var subscriptions = [{ topic: "hello", - qos: 0 + qos: 1 }]; client.subscribe({ From 0ba1b7abbfb8bbc7f54de587c23ce92773627db0 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Sun, 23 Jun 2013 18:27:13 +0100 Subject: [PATCH 15/30] Updated Ascoltatori to 0.7.0 --- package.json | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/package.json b/package.json index 3089f4f..2fe9b57 100644 --- a/package.json +++ b/package.json @@ -47,22 +47,21 @@ "dependencies": { "mqtt": "~0.2.10", "async": "~0.2.4", - "ascoltatori": "~0.6.0", + "ascoltatori": "~0.7.0", "debug": "~0.7.2", "commander": "~1.1.1", "minimatch": "~0.2.11", "bunyan": "~0.21.3", "memdown": "~0.2.0", - "level": "~0.10.0", "levelup": "~0.10.0", "level-sublevel": "~4.7.0", "tmp": "0.0.20", - "leveldown": "~0.6.1", "level-range": "0.0.0", "level-ttl": "~0.2.0", - "qlobber": "~0.1.1" + "qlobber": "~0.3.0" }, "optionalDependencies": { + "leveldown": "~0.6.1", "zmq": "~2.4.0", "amqp": "~0.1.4", "redis": "~0.8.2" From 0bfc58914222d11b43b0df918b1262a6238b940e Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Sun, 23 Jun 2013 18:32:23 +0100 Subject: [PATCH 16/30] Added MongoDB as an optional dependency. --- package.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/package.json b/package.json index 2fe9b57..dc6a14a 100644 --- a/package.json +++ b/package.json @@ -64,6 +64,7 @@ "leveldown": "~0.6.1", "zmq": "~2.4.0", "amqp": "~0.1.4", - "redis": "~0.8.2" + "redis": "~0.8.2", + "mongodb": "~1.3.10" } } From fa72c3c90db93eada6757657b1b450097427c44c Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Sun, 23 Jun 2013 18:33:59 +0100 Subject: [PATCH 17/30] Added mongodb in .travis.yml. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 34be760..a2070e6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,3 +17,4 @@ node_js: services: - rabbitmq - redis-server + - mongodb From ff5b85927341ff2a281de66aa0be6157b7c5052b Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 24 Jun 2013 10:39:33 +0100 Subject: [PATCH 18/30] Implemented mongo persistance. --- lib/persistance/index.js | 1 + lib/persistance/levelup.js | 6 +- lib/persistance/mongo.js | 221 +++++++++++++++++++++++++++++++++ test/persistance/mongo_spec.js | 112 +++++++++++++++++ 4 files changed, 337 insertions(+), 3 deletions(-) create mode 100644 lib/persistance/mongo.js create mode 100644 test/persistance/mongo_spec.js diff --git a/lib/persistance/index.js b/lib/persistance/index.js index 3efe25a..14110b9 100644 --- a/lib/persistance/index.js +++ b/lib/persistance/index.js @@ -3,3 +3,4 @@ module.exports.Memory = require("./memory"); module.exports.LevelUp = require("./levelup"); module.exports.Redis = require("./redis"); +module.exports.Mongo = require("./mongo"); diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index 05f7600..3d8685d 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -49,7 +49,8 @@ LevelUpPersistance.prototype.storeRetained = function(packet, cb) { LevelUpPersistance.prototype.lookupRetained = function(pattern, cb) { var stream = this._retained.createReadStream(); var matched = []; - var regexp = new RegExp(pattern.replace(/(#|\+)/, ".+")); + var qlobber = new Qlobber({ separator: '/' }); + qlobber.add(pattern, true); stream.on("error", cb); @@ -58,8 +59,7 @@ LevelUpPersistance.prototype.lookupRetained = function(pattern, cb) { }); stream.on("data", function(data) { - - if (regexp.test(data.key)) { + if (qlobber.match(data.key).length > 0) { matched.push(data.value); } }); diff --git a/lib/persistance/mongo.js b/lib/persistance/mongo.js new file mode 100644 index 0000000..5ea86b2 --- /dev/null +++ b/lib/persistance/mongo.js @@ -0,0 +1,221 @@ +"use strict"; + +var AbstractPersistence = require("./abstract"); +var MongoClient = require('mongodb').MongoClient; +var util = require("util"); +var async = require("async"); +var Qlobber = require("qlobber").Qlobber; + +function MongoPersistance(options, done) { + if (!(this instanceof MongoPersistance)) { + return new MongoPersistance(options); + } + + options.ttl = options.ttl || {}; + + // TTL for subscriptions is 1 hour + options.ttl.subscriptions = options.ttl.subscriptions || 60 * 60 * 1000; + + // TTL for packets is 1 hour + options.ttl.packets = options.ttl.packets || 60 * 60 * 1000; + + options.mongo = options.mongo || {}; + + options.mongo.safe = true; + + this.options = options; + + var that = this; + + // Connect to the db + var a = MongoClient.connect(options.url, options.mongo, function(err, db) { + if (err) { + if (done) { + return done(err); + } + // we have no way of providing an error handler + throw err; + } + + that.db = db; + that._subscriptions = db.collection("subscriptions"); + that._packets = db.collection("packets"); + that._retained = db.collection("retained"); + async.parallel([ + that._retained.ensureIndex.bind(that._retained, "topic"), + that._subscriptions.ensureIndex.bind(that._subscriptions, "client"), + that._packets.ensureIndex.bind(that._packets, "client"), + that._subscriptions.ensureIndex.bind(that._subscriptions, { "added": 1 }, { expireAfterSeconds: Math.round(options.ttl.subscriptions / 1000 )} ) + ], function(err) { + if (done) { + done(err, that); + } + }); + }); +} + +util.inherits(MongoPersistance, AbstractPersistence); + +MongoPersistance.prototype.storeSubscriptions = function(client, done) { + + var subscriptions; + var that = this; + + if (!client.clean) { + subscriptions = Object.keys(client.subscriptions).filter(function(key) { + return client.subscriptions[key].qos > 0; + }); + + async.each(subscriptions, function(key, cb) { + that._subscriptions.insert({ + client: client.id, + topic: key, + qos: client.subscriptions[key].qos, + added: new Date() + }, cb); + }, done); + } else if (done) { + return done(); + } +}; + +MongoPersistance.prototype.lookupSubscriptions = function(client, done) { + var that = this; + if (client.clean) { + async.parallel([ + this._subscriptions.remove.bind(this._subscriptions, { client: client.id }), + this._packets.remove.bind(this._packets, { client: client.id }), + ], function(err) { + done(err, {}); + }); + } else { + this._subscriptions.find({ client: client.id }) + .toArray(function(err, subscriptions) { + + var now = Date.now(); + done(err, (subscriptions || []).reduce(function(obj, sub) { + // mongodb TTL is not precise + if (sub.added.getTime() + that.options.ttl.subscriptions > now) { + obj[sub.topic] = { + qos: sub.qos + }; + } + return obj; + }, {})); + }); + } +}; + +MongoPersistance.prototype.storeRetained = function(packet, cb) { + this._retained.insert(packet, function(err) { + if (cb) { + cb(err); + } + // TODO what to do with an err when there is no cb? + }); +}; + +MongoPersistance.prototype.lookupRetained = function(pattern, cb) { + + var regexp = new RegExp(pattern.replace(/(#|\+)/, ".+")); + var stream = this._retained.find({ topic: { $regex: regexp } }).stream(); + var matched = []; + var qlobber = new Qlobber({ separator: '/' }); + qlobber.add(pattern, true); + + stream.on("error", cb); + + stream.on("end", function() { + cb(null, matched); + }); + + stream.on("data", function(data) { + if (qlobber.match(data.topic).length > 0) { + matched.push(data); + } + }); +}; + +MongoPersistance.prototype.storeOfflinePacket = function(packet, done) { + + var parts = packet.topic.split("/"); + var patterns = []; + var trail = parts.reduce(function(trail, part) { + if (trail.length > 0) { + patterns.push(trail.concat("+").join("/")); + patterns.push(trail.concat("#").join("/")); + } + + trail.push(part); + + return trail; + }, []); + + patterns.push(packet.topic); + + var stream = this._subscriptions.find({ topic: { $in: patterns } }).stream(); + var ended = false; + var completed = 0; + var started = 0; + var that = this; + + if (done) { + stream.on("error", done); + } + + stream.on("data", function(data) { + started++; + + that._packets.insert({ + client: data.client, + packet: packet + }, function(err) { + if (err) { + return stream.emit("error", err); + } + + // TODO handle the err in case of no callback + completed++; + + if (done && ended && started === completed) { + done(); + } + }); + }); + + stream.on("end", function() { + ended = true; + if (done && started === completed) { + done(); + } + }); +}; + +MongoPersistance.prototype.streamOfflinePackets = function(client, cb) { + if (client.clean) { + return; + } + + var stream = this._packets.find({ client: client.id }).stream(); + var that = this; + + stream.on("error", cb); + + stream.on("end", function() { + that._packets.remove({ client: client.id }, function() {}); + }); + + stream.on("data", function(data) { + cb(null, data.packet); + }); +}; + +MongoPersistance.prototype.close = function(cb) { + if (this.db) { + this.db.close(cb); + } else { + cb(); + } +}; + +module.exports = MongoPersistance; diff --git a/test/persistance/mongo_spec.js b/test/persistance/mongo_spec.js new file mode 100644 index 0000000..a5ad276 --- /dev/null +++ b/test/persistance/mongo_spec.js @@ -0,0 +1,112 @@ +"use strict"; + +var abstract = require("./abstract"); +var Mongo = require("../../").persistance.Mongo; +var redis = require("redis"); +var MongoClient = require('mongodb').MongoClient; + +describe("mosca.persistance.Mongo", function() { + + var opts = { + url: "mongodb://localhost:27017/moscatests", + ttl: { + checkFrequency: 1000, + subscriptions: 1000, + packets: 1000 + } + }; + + abstract(function(cb) { + new Mongo(opts, function(err, mongo) { + cb(err, mongo, opts); + }); + }); + + afterEach(function(cb) { + if (this.secondInstance) { + this.secondInstance.close(); + } + + // Connect to the db + MongoClient.connect(opts.url, function(err, db) { + if (err) { + return cb(err); + } + + db.dropDatabase(cb); + }); + }); + + describe("two clients", function() { + + it("should support restoring", function(done) { + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + "hello/#": { + qos: 1 + } + } + }; + + var packet = { + topic: "hello/42", + qos: 0, + payload: "world", + messageId: 42 + }; + + var that = this; + + this.instance.storeSubscriptions(client, function() { + that.instance.close(function() { + that.instance = new Mongo(opts); + setTimeout(function() { + that.instance.storeOfflinePacket(packet, function() { + that.instance.streamOfflinePackets(client, function(err, p) { + expect(p).to.eql(packet); + done(); + }); + }); + }, 10); + }); + }); + }); + + it("should support synchronization", function(done) { + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + "hello/#": { + qos: 1 + } + } + }; + + var packet = { + topic: "hello/42", + qos: 0, + payload: "world", + messageId: 42 + }; + + var that = this; + that.secondInstance = new Mongo(opts); + + setTimeout(function() { + that.instance.storeSubscriptions(client, function() { + setTimeout(function() { + that.secondInstance.storeOfflinePacket(packet, function() { + that.instance.streamOfflinePackets(client, function(err, p) { + expect(p).to.eql(packet); + done(); + }); + }); + }, 10); + }); + }, 10); + }); + }); +}); From 9d12d91f1e50ac6bfb58fa67a2c5bc1b11876f11 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 24 Jun 2013 10:50:46 +0100 Subject: [PATCH 19/30] Made Mongo persistance spec faster. --- lib/persistance/mongo.js | 14 ++++++++---- test/persistance/mongo_spec.js | 39 ++++++++++++++++++++++------------ test/persistance/redis_spec.js | 1 + 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/lib/persistance/mongo.js b/lib/persistance/mongo.js index 5ea86b2..02b6968 100644 --- a/lib/persistance/mongo.js +++ b/lib/persistance/mongo.js @@ -27,8 +27,7 @@ function MongoPersistance(options, done) { var that = this; - // Connect to the db - var a = MongoClient.connect(options.url, options.mongo, function(err, db) { + var connected = function(err, db) { if (err) { if (done) { return done(err); @@ -51,7 +50,14 @@ function MongoPersistance(options, done) { done(err, that); } }); - }); + }; + + // Connect to the db + if (options.connection) { + connected(null, options.connection); + } else { + MongoClient.connect(options.url, options.mongo, connected); + } } util.inherits(MongoPersistance, AbstractPersistence); @@ -211,7 +217,7 @@ MongoPersistance.prototype.streamOfflinePackets = function(client, cb) { }; MongoPersistance.prototype.close = function(cb) { - if (this.db) { + if (this.db && this.options.autoClose !== false) { this.db.close(cb); } else { cb(); diff --git a/test/persistance/mongo_spec.js b/test/persistance/mongo_spec.js index a5ad276..68970c2 100644 --- a/test/persistance/mongo_spec.js +++ b/test/persistance/mongo_spec.js @@ -4,11 +4,13 @@ var abstract = require("./abstract"); var Mongo = require("../../").persistance.Mongo; var redis = require("redis"); var MongoClient = require('mongodb').MongoClient; +var async = require("async"); describe("mosca.persistance.Mongo", function() { var opts = { url: "mongodb://localhost:27017/moscatests", + autoClose: false, ttl: { checkFrequency: 1000, subscriptions: 1000, @@ -16,24 +18,35 @@ describe("mosca.persistance.Mongo", function() { } }; - abstract(function(cb) { - new Mongo(opts, function(err, mongo) { - cb(err, mongo, opts); + before(function(done) { + // Connect to the db + MongoClient.connect(opts.url, { safe: true }, function(err, db) { + opts.connection = db; + done(err); }); }); - afterEach(function(cb) { - if (this.secondInstance) { - this.secondInstance.close(); - } - - // Connect to the db - MongoClient.connect(opts.url, function(err, db) { - if (err) { - return cb(err); + beforeEach(function(done) { + async.parallel([ + function(cb) { + opts.connection.collection("subscriptions").drop(cb); + }, + function(cb) { + opts.connection.collection("packets").drop(cb); + }, + function(cb) { + opts.connection.collection("retained").drop(cb); } + ], done); + }); + + afterEach(function() { + this.secondInstance = null; + }); - db.dropDatabase(cb); + abstract(function(cb) { + new Mongo(opts, function(err, mongo) { + cb(err, mongo, opts); }); }); diff --git a/test/persistance/redis_spec.js b/test/persistance/redis_spec.js index c599e45..71defc8 100644 --- a/test/persistance/redis_spec.js +++ b/test/persistance/redis_spec.js @@ -21,6 +21,7 @@ describe("mosca.persistance.Redis", function() { afterEach(function(cb) { if (this.secondInstance) { this.secondInstance.close(); + this.secondInstance = null; } var client = redis.createClient(); From 2cbbe166d0d041492263b5097e71815730d52547 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 24 Jun 2013 12:09:13 +0100 Subject: [PATCH 20/30] Added 'before' to the .jshintrc. --- .jshintrc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.jshintrc b/.jshintrc index 037052f..3729cb1 100644 --- a/.jshintrc +++ b/.jshintrc @@ -9,6 +9,7 @@ "beforeEach": false, "describe": false, "mosca": false, - "expect": false + "expect": false, + "before": false } } From 3bc35d3c1c5a1476aaaedb2fd6f34307e4c25bf4 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 25 Jun 2013 10:41:14 +0100 Subject: [PATCH 21/30] Fixed offline topic support with wildcards in Mongodb. --- lib/persistance/mongo.js | 16 +------ lib/persistance/utils.js | 41 ++++++++++++++++ package.json | 2 +- test/persistance/utils_spec.js | 87 ++++++++++++++++++++++++++++++++++ 4 files changed, 131 insertions(+), 15 deletions(-) create mode 100644 lib/persistance/utils.js create mode 100644 test/persistance/utils_spec.js diff --git a/lib/persistance/mongo.js b/lib/persistance/mongo.js index 02b6968..e44e116 100644 --- a/lib/persistance/mongo.js +++ b/lib/persistance/mongo.js @@ -5,6 +5,7 @@ var MongoClient = require('mongodb').MongoClient; var util = require("util"); var async = require("async"); var Qlobber = require("qlobber").Qlobber; +var topicPatterns = require("./utils").topicPatterns; function MongoPersistance(options, done) { if (!(this instanceof MongoPersistance)) { @@ -144,20 +145,7 @@ MongoPersistance.prototype.lookupRetained = function(pattern, cb) { MongoPersistance.prototype.storeOfflinePacket = function(packet, done) { - var parts = packet.topic.split("/"); - var patterns = []; - var trail = parts.reduce(function(trail, part) { - if (trail.length > 0) { - patterns.push(trail.concat("+").join("/")); - patterns.push(trail.concat("#").join("/")); - } - - trail.push(part); - - return trail; - }, []); - - patterns.push(packet.topic); + var patterns = topicPatterns(packet.topic); var stream = this._subscriptions.find({ topic: { $in: patterns } }).stream(); var ended = false; diff --git a/lib/persistance/utils.js b/lib/persistance/utils.js new file mode 100644 index 0000000..92adb88 --- /dev/null +++ b/lib/persistance/utils.js @@ -0,0 +1,41 @@ + +module.exports = { + topicPatterns: function(topic) { + var parts = topic.split("/"); + var patterns = [topic]; + var i, a = [], b = [], j, k, h, list = []; + + for (j=1; j < parts.length; j++) { + list.length = 0; // clear the array + + for (i=0; i < parts.length; i++) { + a.length = 0; + b.length = 0; + + list.push(i); + for (h = 1; list.length < j; h++) { + list.unshift(parts.length - h); + } + + for (k=0; k < parts.length; k++) { + if (list.indexOf(k) >= 0) { + a.push(parts[k]); + b.push(parts[k]); + } else { + if (k === 0 || a[a.length - 1] !== "#") { + a.push("#"); + } + b.push("+"); + } + } + + patterns.push(a.join("/")); + patterns.push(b.join("/")); + list.shift(); + } + } + + return patterns; + } + +}; diff --git a/package.json b/package.json index dc6a14a..119e6db 100644 --- a/package.json +++ b/package.json @@ -32,7 +32,7 @@ "license": "MIT", "devDependencies": { "mocha": "~1.8.1", - "chai": "~1.5.0", + "chai": "~1.7.1", "sinon": "~1.4.2", "sinon-chai": "~2.1.2", "async_bench": "0.0.1", diff --git a/test/persistance/utils_spec.js b/test/persistance/utils_spec.js new file mode 100644 index 0000000..8457ae6 --- /dev/null +++ b/test/persistance/utils_spec.js @@ -0,0 +1,87 @@ +"use strict"; + +var abstract = require("./abstract"); +var utilities = require("../../lib/persistance/utils"); +var topicPatterns = utilities.topicPatterns; +var async = require("async"); + +describe("persistance utilities", function() { + + describe("topicPatterns", function() { + + it("should return the topic itself if it is not part of a tree", function() { + expect(topicPatterns("hello")).to.eql(["hello"]); + }); + + it("should return all the possibilities for a 2-level topic", function() { + var members = [ + "hello/world", + "hello/#", + "hello/+", + "#/world", + "+/world" + ]; + var result = topicPatterns("hello/world"); + expect(result).to.include.members(members); + expect(result).to.have.property("length", members.length); + }); + + it("should return all the possibilities for a 3-level topic", function() { + var members = [ + "hello/world/42", + "hello/#", + "hello/+/+", + "#/world/#", + "+/world/+", + "#/42", + "+/+/42", + "hello/#/42", + "hello/+/42", + "hello/world/#", + "hello/world/+", + "#/world/42", + "+/world/42" + ]; + var result = topicPatterns("hello/world/42"); + expect(result).to.include.members(members); + expect(result).to.have.property("length", members.length); + }); + + it("should return all the possibilities for a 4-level topic", function() { + var members = [ + 'hello/matteo/and/david', + 'hello/#', + 'hello/+/+/+', + '#/matteo/#', + '+/matteo/+/+', + '#/and/#', + '+/+/and/+', + '#/david', + '+/+/+/david', + 'hello/#/david', + 'hello/+/+/david', + 'hello/matteo/#', + 'hello/matteo/+/+', + '#/matteo/and/#', + '+/matteo/and/+', + '#/and/david', + '+/+/and/david', + 'hello/#/and/david', + 'hello/+/and/david', + 'hello/matteo/#/david', + 'hello/matteo/+/david', + 'hello/matteo/and/#', + 'hello/matteo/and/+', + '#/matteo/and/david', + '+/matteo/and/david' + // FIXME generate the remaining ones + //'+/matteo/+/david', + //'#/matteo/#/david', + //'hello/#/and/#', + //'hello/+/and/+' + ]; + var result = topicPatterns("hello/matteo/and/david"); + expect(result).to.eql(members); + }); + }); +}); From ed772c8a34087df67ed49f9ff52c5ea1efc4d30d Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 25 Jun 2013 10:47:30 +0100 Subject: [PATCH 22/30] Added memoization of topicPatterns. --- lib/persistance/utils.js | 13 +++++++++++++ package.json | 3 ++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/lib/persistance/utils.js b/lib/persistance/utils.js index 92adb88..8d9bb9f 100644 --- a/lib/persistance/utils.js +++ b/lib/persistance/utils.js @@ -1,6 +1,19 @@ +var LRU = require("lru-cache"); +var cache = LRU({ + max: 10000, + maxAge: 1000 * 60 * 60 +}); module.exports = { topicPatterns: function(topic) { + var result = cache.get(topic); + if (!result) { + result = module.exports._topicPatterns(topic); + } + cache.set(topic, result); + return result; + }, + _topicPatterns: function(topic) { var parts = topic.split("/"); var patterns = [topic]; var i, a = [], b = [], j, k, h, list = []; diff --git a/package.json b/package.json index 119e6db..df521cf 100644 --- a/package.json +++ b/package.json @@ -58,7 +58,8 @@ "tmp": "0.0.20", "level-range": "0.0.0", "level-ttl": "~0.2.0", - "qlobber": "~0.3.0" + "qlobber": "~0.3.0", + "lru-cache": "~2.3.0" }, "optionalDependencies": { "leveldown": "~0.6.1", From 90c167712399af8a185fd99e354d56adbe0a4612 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 25 Jun 2013 11:00:25 +0100 Subject: [PATCH 23/30] Reformatting of the persistance utilities module. --- lib/persistance/utils.js | 94 +++++++++++++++++++++++----------------- 1 file changed, 54 insertions(+), 40 deletions(-) diff --git a/lib/persistance/utils.js b/lib/persistance/utils.js index 8d9bb9f..97433a8 100644 --- a/lib/persistance/utils.js +++ b/lib/persistance/utils.js @@ -4,51 +4,65 @@ var cache = LRU({ maxAge: 1000 * 60 * 60 }); -module.exports = { - topicPatterns: function(topic) { - var result = cache.get(topic); - if (!result) { - result = module.exports._topicPatterns(topic); - } - cache.set(topic, result); - return result; - }, - _topicPatterns: function(topic) { - var parts = topic.split("/"); - var patterns = [topic]; - var i, a = [], b = [], j, k, h, list = []; - - for (j=1; j < parts.length; j++) { - list.length = 0; // clear the array - - for (i=0; i < parts.length; i++) { - a.length = 0; - b.length = 0; - - list.push(i); - for (h = 1; list.length < j; h++) { - list.unshift(parts.length - h); - } +/** + * Generate the possible patterns that might match a topic. + * + * @param {String} the topic + * @return the list of the patterns + */ +function _topicPatterns(topic) { + var parts = topic.split("/"); + var patterns = [topic]; + var i, a = [], b = [], j, k, h, list = []; + + for (j=1; j < parts.length; j++) { + list.length = 0; // clear the array + + for (i=0; i < parts.length; i++) { + a.length = 0; + b.length = 0; + + list.push(i); + for (h = 1; list.length < j; h++) { + list.unshift(parts.length - h); + } - for (k=0; k < parts.length; k++) { - if (list.indexOf(k) >= 0) { - a.push(parts[k]); - b.push(parts[k]); - } else { - if (k === 0 || a[a.length - 1] !== "#") { - a.push("#"); - } - b.push("+"); + for (k=0; k < parts.length; k++) { + if (list.indexOf(k) >= 0) { + a.push(parts[k]); + b.push(parts[k]); + } else { + if (k === 0 || a[a.length - 1] !== "#") { + a.push("#"); } + b.push("+"); } - - patterns.push(a.join("/")); - patterns.push(b.join("/")); - list.shift(); } + + patterns.push(a.join("/")); + patterns.push(b.join("/")); + list.shift(); } + } - return patterns; + return patterns; +} + +/** + * Generate the possible patterns that might match a topic. + * Memozied version. + * + * @param {String} the topic + * @return the list of the patterns + */ +function topicPatterns(topic) { + var result = cache.get(topic); + if (!result) { + result = _topicPatterns(topic); } + cache.set(topic, result); + return result; +} + -}; +module.exports.topicPatterns = topicPatterns; From 0f4bebc7235d8e790ea927f1758f7867dec3529e Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 25 Jun 2013 11:20:46 +0100 Subject: [PATCH 24/30] Implemtened CLI support for persistance. --- lib/cli.js | 12 +++++++++++- lib/persistance/abstract.js | 1 + lib/persistance/levelup.js | 6 +++--- lib/persistance/memory.js | 3 ++- lib/server.js | 4 ++++ test/cli_spec.js | 24 ++++++++++++++++++++++++ test/persistance/levelup_spec.js | 5 +++-- 7 files changed, 48 insertions(+), 7 deletions(-) diff --git a/lib/cli.js b/lib/cli.js index 84b54e5..4450edb 100644 --- a/lib/cli.js +++ b/lib/cli.js @@ -6,6 +6,7 @@ var fs = require("fs"); var async = require("async"); var bunyan = require("bunyan"); var Server = require("./server"); +var persistance = require("./persistance"); /** * Load a new Authorizer @@ -51,7 +52,10 @@ function start(program, callback) { var opts = { backend: {}, - logger: {} + logger: {}, + persistance: { + factory: persistance.Memory + } }; opts.port = program.port; @@ -81,6 +85,11 @@ function start(program, callback) { opts.logger.level = 20; } + if (program.db) { + opts.persistance.path = program.db; + opts.persistance.factory = persistance.LevelUp; + } + var setupAuthorizer = function(cb) { process.on("SIGHUP", setupAuthorizer); server.on("closed", function() { @@ -146,6 +155,7 @@ module.exports = function cli(argv, callback) { .option("--authorize-publish ", "the pattern for publishing to topics for the added user") .option("--authorize-subscribe ", "the pattern for subscribing to topics for the added user") .option("-c, --config ", "the config file to use (override every other option)") + .option("-d, --db ", "the path were to store the database") .option("-v, --verbose", "set the bunyan log to INFO") .option("--very-verbose", "set the bunyan log to DEBUG"); diff --git a/lib/persistance/abstract.js b/lib/persistance/abstract.js index 262a65e..3f8c98f 100644 --- a/lib/persistance/abstract.js +++ b/lib/persistance/abstract.js @@ -7,6 +7,7 @@ function AbstractPersistence() { AbstractPersistence.prototype.wire = function(server) { var that = this; var nop = function() {}; + server.persistance = this; server.on("published", function(packet) { if (packet.retain) { diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index 3d8685d..428a89d 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -9,9 +9,9 @@ var ttl = require('level-ttl'); var Qlobber = require("qlobber").Qlobber; var async = require("async"); -function LevelUpPersistance(path, options) { +function LevelUpPersistance(options) { if (!(this instanceof LevelUpPersistance)) { - return new LevelUpPersistance(path, options); + return new LevelUpPersistance(options); } options = options || {}; options.valueEncoding = "json"; @@ -27,7 +27,7 @@ function LevelUpPersistance(path, options) { options.ttl.checkFrequency = options.ttl.checkFrequency || 60 * 1000; this.options = options; - this.db = ttl(levelup(path, options), options.ttl); + this.db = ttl(levelup(options.path, options), options.ttl); this._retained = this.db.sublevel("retained"); this._clientSubscriptions = this.db.sublevel("clientSubscriptions"); this._subscriptions = this.db.sublevel("subscriptions"); diff --git a/lib/persistance/memory.js b/lib/persistance/memory.js index edcf34b..646aa0c 100644 --- a/lib/persistance/memory.js +++ b/lib/persistance/memory.js @@ -12,7 +12,8 @@ function MemoryPersistance(options) { options = options || {}; options.db = factory; - LevelUpPersistance.call(this, "RAM", options); + options.path = "RAM"; + LevelUpPersistance.call(this, options); } util.inherits(MemoryPersistance, LevelUpPersistance); diff --git a/lib/server.js b/lib/server.js index 993f3d9..270b6ac 100644 --- a/lib/server.js +++ b/lib/server.js @@ -53,6 +53,10 @@ function Server(opts, callback) { packet: packetSerializer }; + if (this.opts.persistance && this.opts.persistance.factory) { + this.opts.persistance.factory(this.opts.persistance).wire(this); + } + callback = callback || function() {}; this.clients = {}; diff --git a/test/cli_spec.js b/test/cli_spec.js index f0359a4..2a0262d 100644 --- a/test/cli_spec.js +++ b/test/cli_spec.js @@ -389,4 +389,28 @@ describe("mosca.cli", function() { }); }); }); + + it("should create a memory persistance object", function(done) { + var s = startServer(done, function(server) { + expect(server.persistance).to.be.instanceOf(mosca.persistance.Memory); + }); + }); + + it("should create a leveldb with the --db flag", function(done) { + + tmp.dir(function (err, path, fd) { + if (err) { + done(err); + return; + } + + args.push("--db"); + args.push(path); + + startServer(done, function(server) { + expect(server.persistance).to.be.instanceOf(mosca.persistance.LevelUp); + expect(server.persistance.options.path).to.eql(path); + }); + }); + }); }); diff --git a/test/persistance/levelup_spec.js b/test/persistance/levelup_spec.js index be0b118..7d2c277 100644 --- a/test/persistance/levelup_spec.js +++ b/test/persistance/levelup_spec.js @@ -23,7 +23,8 @@ describe("mosca.persistance.LevelUp", function() { } that.path = path; - cb(null, new LevelUp(path, opts), opts); + opts.path = path; + cb(null, new LevelUp(opts), opts); }); }); @@ -50,7 +51,7 @@ describe("mosca.persistance.LevelUp", function() { this.instance.storeSubscriptions(client, function() { that.instance.close(function() { - that.instance = new LevelUp(that.path, opts); + that.instance = new LevelUp(opts); setTimeout(function() { that.instance.storeOfflinePacket(packet, function() { that.instance.streamOfflinePackets(client, function(err, p) { From 5ec3c83dc068a8c4a172e30e73562ae47122723e Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 25 Jun 2013 11:35:41 +0100 Subject: [PATCH 25/30] Added benchmark support for uncleaned clients. --- .gitignore | 1 + benchmarks/single_pub_sub.js | 3 +- db-mosquitto.conf | 675 +++++++++++++++++++++++++++++++++++ 3 files changed, 678 insertions(+), 1 deletion(-) create mode 100644 db-mosquitto.conf diff --git a/.gitignore b/.gitignore index 4f5ad6b..5633570 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ results npm-debug.log node_modules docs +mosquitto.db diff --git a/benchmarks/single_pub_sub.js b/benchmarks/single_pub_sub.js index 2bd3a76..2defe19 100755 --- a/benchmarks/single_pub_sub.js +++ b/benchmarks/single_pub_sub.js @@ -8,7 +8,7 @@ var mqtt = require("mqtt"); function setup(done) { - var client = mqtt.createClient(1883, "localhost", { clean: true }); + var client = mqtt.createClient(1883, "localhost", { clean: program.clean }); client.on("connect", function () { client.subscribe("hello", { qos: program.qos }, function () { @@ -41,6 +41,7 @@ function bench(client, done) { } program + .option("--clean", "use clean clients") .option("--header", "add header") .option("-r, --runs ", "the number of runs to execute", parseInt, 10) .option("-q, --qos ", "the QoS level (0, 1, 2)", parseInt, 0) diff --git a/db-mosquitto.conf b/db-mosquitto.conf new file mode 100644 index 0000000..38a6eee --- /dev/null +++ b/db-mosquitto.conf @@ -0,0 +1,675 @@ +# Config file for mosquitto +# +# See mosquitto.conf(5) for more information. +# +# Default values are shown, uncomment to change. +# +# Use the # character to indicate a comment, but only if it is the +# very first character on the line. + +# ================================================================= +# General configuration +# ================================================================= + +# Time in seconds to wait before resending an outgoing QoS=1 or +# QoS=2 message. +#retry_interval 20 + +# Time in seconds between updates of the $SYS tree. +#sys_interval 10 + +# Time in seconds between cleaning the internal message store of +# unreferenced messages. Lower values will result in lower memory +# usage but more processor time, higher values will have the +# opposite effect. +# Setting a value of 0 means the unreferenced messages will be +# disposed of as quickly as possible. +#store_clean_interval 10 + +# Write process id to a file. Default is a blank string which means +# a pid file shouldn't be written. +# This should be set to /var/run/mosquitto.pid if mosquitto is +# being run automatically on boot with an init script and +# start-stop-daemon or similar. +#pid_file + +# When run as root, drop privileges to this user and its primary +# group. +# Leave blank to stay as root, but this is not recommended. +# If run as a non-root user, this setting has no effect. +# Note that on Windows this has no effect and so mosquitto should +# be started by the user you wish it to run as. +#user mosquitto + +# The maximum number of QoS 1 and 2 messages currently inflight per +# client. +# This includes messages that are partway through handshakes and +# those that are being retried. Defaults to 20. Set to 0 for no +# maximum. Setting to 1 will guarantee in-order delivery of QoS 1 +# and 2 messages. +#max_inflight_messages 20 + +# The maximum number of QoS 1 and 2 messages to hold in a queue +# above those that are currently in-flight. Defaults to 100. Set +# to 0 for no maximum (not recommended). +# See also queue_qos0_messages. +#max_queued_messages 100 + +# Set to true to queue messages with QoS 0 when a persistent client is +# disconnected. These messages are included in the limit imposed by +# max_queued_messages. +# Defaults to false. +# Note that the MQTT v3.1 spec states that only QoS 1 and 2 messages +# should be saved in this situation so this is a non-standard option. +#queue_qos0_messages false + +# This option allows persistent clients (those with clean session set to false) +# to be removed if they do not reconnect within a certain time frame. This is a +# non-standard option. As far as the MQTT spec is concerned, persistent clients +# persist forever. +# Badly designed clients may set clean session to false whilst using a randomly +# generated client id. This leads to persistent clients that will never +# reconnect. This option allows these clients to be removed. +# +# The expiration period should be an integer followed by one of d w m y for +# day, week, month and year respectively. For example +# +# persistent_client_expiration 2m +# persistent_client_expiration 14d +# persistent_client_expiration 1y +# +# As this is a non-standard option, the default if not set is to never expire +# persistent clients. +#persistent_client_expiration + +# If a client is subscribed to multiple subscriptions that overlap, e.g. foo/# +# and foo/+/baz , then MQTT expects that when the broker receives a message on +# a topic that matches both subscriptions, such as foo/bar/baz, then the client +# should only receive the message once. +# Mosquitto keeps track of which clients a message has been sent to in order to +# meet this requirement. The allow_duplicate_messages option allows this +# behaviour to be disabled, which may be useful if you have a large number of +# clients subscribed to the same set of topics and are very concerned about +# minimising memory usage. +# It can be safely set to true if you know in advance that your clients will +# never have overlapping subscriptions, otherwise your clients must be able to +# correctly deal with duplicate messages even when then have QoS=2. +#allow_duplicate_messages false + +# ================================================================= +# Default listener +# ================================================================= + +# IP address/hostname to bind the default listener to. If not +# given, the default listener will not be bound to a specific +# address and so will be accessible to all network interfaces. +# bind_address ip-address/host name +#bind_address + +# Port to use for the default listener. +#port 1883 + +# The maximum number of client connections to allow. This is +# a per listener setting. +# Default is -1, which means unlimited connections. +# Note that other process limits mean that unlimited connections +# are not really possible. Typically the default maximum number of +# connections possible is around 1024. +#max_connections -1 + +# ----------------------------------------------------------------- +# Certificate based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable SSL/TLS support for +# this listener. Note that the recommended port for MQTT over TLS +# is 8883, but this must be set manually. +# +# See also the mosquitto-tls man page. + +# At least one of cafile or capath must be defined. They both +# define methods of accessing the PEM encoded Certificate +# Authority certificates that have signed your server certificate +# and that you wish to trust. +# cafile defines the path to a file containing the CA certificates. +# capath defines a directory that will be searched for files +# containing the CA certificates. For capath to work correctly, the +# certificate files must have ".crt" as the file ending and you must run +# "c_rehash " each time you add/remove a certificate. +#cafile +#capath + +# Path to the PEM encoded server certificate. +#certfile + +# Path to the PEM encoded keyfile. +#keyfile + +# By default a TLS enabled listener will operate in a similar fashion to a +# https enabled web server, in that the server has a certificate signed by a CA +# and the client will verify that it is a trusted certificate. The overall aim +# is encryption of the network traffic. By setting require_certificate to true, +# the client must provide a valid certificate in order for the network +# connection to proceed. This allows access to the broker to be controlled +# outside of the mechanisms provided by MQTT. +#require_certificate false + +# If require_certificate is true, you may set use_identity_as_username to true +# to use the CN value from the client certificate as a username. If this is +# true, the password_file option will not be used for this listener. +#use_identity_as_username false + +# If you have require_certificate set to true, you can create a certificate +# revocation list file to revoke access to particular client certificates. If +# you have done this, use crlfile to point to the PEM encoded revocation file. +#crlfile + +# If you wish to control which encryption ciphers are used, use the ciphers +# option. The list of available ciphers can be optained using the "openssl +# ciphers" command and should be provided in the same format as the output of +# that command. +#ciphers + +# ----------------------------------------------------------------- +# Pre-shared-key based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable PSK based SSL/TLS support for +# this listener. Note that the recommended port for MQTT over TLS is 8883, but +# this must be set manually. +# +# See also the mosquitto-tls man page and the "Certificate based SSL/TLS +# support" section. Only one of certificate or PSK encryption support can be +# enabled for any listener. + +# The psk_hint option enables pre-shared-key support for this listener and also +# acts as an identifier for this listener. The hint is sent to clients and may +# be used locally to aid authentication. The hint is a free form string that +# doesn't have much meaning in itself, so feel free to be creative. +# If this option is provided, see psk_file to define the pre-shared keys to be +# used or create a security plugin to handle them. +#psk_hint + +# Set use_identity_as_username to have the psk identity sent by the client used +# as its username. Authentication will be carried out using the PSK rather than +# the MQTT username/password and so password_file will not be used for this +# listener. +#use_identity_as_username false + +# When using PSK, the encryption ciphers used will be chosen from the list of +# available PSK ciphers. If you want to control which ciphers are available, +# use the "ciphers" option. The list of available ciphers can be optained +# using the "openssl ciphers" command and should be provided in the same format +# as the output of that command. +#ciphers + +# ================================================================= +# Extra listeners +# ================================================================= + +# Listen on a port/ip address combination. By using this variable +# multiple times, mosquitto can listen on more than one port. If +# this variable is used and neither bind_address nor port given, +# then the default listener will not be started. +# The port number to listen on must be given. Optionally, an ip +# address or host name may be supplied as a second argument. In +# this case, mosquitto will attempt to bind the listener to that +# address and so restrict access to the associated network and +# interface. By default, mosquitto will listen on all interfaces. +# listener port-number [ip address/host name] +#listener + +# The maximum number of client connections to allow. This is +# a per listener setting. +# Default is -1, which means unlimited connections. +# Note that other process limits mean that unlimited connections +# are not really possible. Typically the default maximum number of +# connections possible is around 1024. +#max_connections -1 + +# The listener can be restricted to operating within a topic hierarchy using +# the mount_point option. This is achieved be prefixing the mount_point string +# to all topics for any clients connected to this listener. This prefixing only +# happens internally to the broker; the client will not see the prefix. +#mount_point + +# ----------------------------------------------------------------- +# Certificate based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable certificate based SSL/TLS support +# for this listener. Note that the recommended port for MQTT over TLS is 8883, +# but this must be set manually. +# +# See also the mosquitto-tls man page and the "Pre-shared-key based SSL/TLS +# support" section. Only one of certificate or PSK encryption support can be +# enabled for any listener. + +# At least one of cafile or capath must be defined to enable certificate based +# TLS encryption. They both define methods of accessing the PEM encoded +# Certificate Authority certificates that have signed your server certificate +# and that you wish to trust. +# cafile defines the path to a file containing the CA certificates. +# capath defines a directory that will be searched for files +# containing the CA certificates. For capath to work correctly, the +# certificate files must have ".crt" as the file ending and you must run +# "c_rehash " each time you add/remove a certificate. +#cafile +#capath + +# Path to the PEM encoded server certificate. +#certfile + +# Path to the PEM encoded keyfile. +#keyfile + +# By default an TLS enabled listener will operate in a similar fashion to a +# https enabled web server, in that the server has a certificate signed by a CA +# and the client will verify that it is a trusted certificate. The overall aim +# is encryption of the network traffic. By setting require_certificate to true, +# the client must provide a valid certificate in order for the network +# connection to proceed. This allows access to the broker to be controlled +# outside of the mechanisms provided by MQTT. +#require_certificate false + +# If require_certificate is true, you may set use_identity_as_username to true +# to use the CN value from the client certificate as a username. If this is +# true, the password_file option will not be used for this listener. +#use_identity_as_username false + +# If you have require_certificate set to true, you can create a certificate +# revocation list file to revoke access to particular client certificates. If +# you have done this, use crlfile to point to the PEM encoded revocation file. +#crlfile + +# If you wish to control which encryption ciphers are used, use the ciphers +# option. The list of available ciphers can be optained using the "openssl +# ciphers" command and should be provided in the same format as the output of +# that command. +#ciphers + +# ----------------------------------------------------------------- +# Pre-shared-key based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable PSK based SSL/TLS support for +# this listener. Note that the recommended port for MQTT over TLS is 8883, but +# this must be set manually. +# +# See also the mosquitto-tls man page and the "Certificate based SSL/TLS +# support" section. Only one of certificate or PSK encryption support can be +# enabled for any listener. + +# The psk_hint option enables pre-shared-key support for this listener and also +# acts as an identifier for this listener. The hint is sent to clients and may +# be used locally to aid authentication. The hint is a free form string that +# doesn't have much meaning in itself, so feel free to be creative. +# If this option is provided, see psk_file to define the pre-shared keys to be +# used or create a security plugin to handle them. +#psk_hint + +# Set use_identity_as_username to have the psk identity sent by the client used +# as its username. Authentication will be carried out using the PSK rather than +# the MQTT username/password and so password_file will not be used for this +# listener. +#use_identity_as_username false + +# When using PSK, the encryption ciphers used will be chosen from the list of +# available PSK ciphers. If you want to control which ciphers are available, +# use the "ciphers" option. The list of available ciphers can be optained +# using the "openssl ciphers" command and should be provided in the same format +# as the output of that command. +#ciphers + +# ================================================================= +# Persistence +# ================================================================= + +# If persistence is enabled, save the in-memory database to disk +# every autosave_interval seconds. If set to 0, the persistence +# database will only be written when mosquitto exits. See also +# autosave_on_changes. +# Note that writing of the persistence database can be forced by +# sending mosquitto a SIGUSR1 signal. +#autosave_interval 1800 + +# If true, mosquitto will count the number of subscription changes, retained +# messages received and queued messages and if the total exceeds +# autosave_interval then the in-memory database will be saved to disk. +# If false, mosquitto will save the in-memory database to disk by treating +# autosave_interval as a time in seconds. +#autosave_on_changes false + +# Save persistent message data to disk (true/false). +# This saves information about all messages, including +# subscriptions, currently in-flight messages and retained +# messages. +# retained_persistence is a synonym for this option. +persistence true + +# The filename to use for the persistent database, not including +# the path. +persistence_file mosquitto.db + +# Location for persistent database. Must include trailing / +# Default is an empty string (current directory). +# Set to /var/lib/mosquitto/ if running as a proper service. +#persistence_location + +# ================================================================= +# Logging +# ================================================================= + +# Places to log to. Use multiple log_dest lines for multiple +# logging destinations. +# Possible destinations are: stdout stderr syslog topic +# stdout and stderr log to the console on the named output. +# syslog uses the userspace syslog facility which usually ends up +# in /var/log/messages or similar. +# topic logs to the broker topic '$SYS/broker/log/', +# where severity is one of D, E, W, N, I which are debug, error, +# warning, notice and information. +# Note that if the broker is running as a Windows service it will default to +# "log_dest none" and neither stdout nor stderr logging is available. +# Use "log_dest none" if you wish to disable logging. +#log_dest stderr + +# Types of messages to log. Use multiple log_type lines for logging +# multiple types of messages. +# Possible types are: debug, error, warning, notice, information, +# none. +# Note that debug type messages are for decoding the incoming +# network packets. +# They are not logged in syslog. +#log_type error +#log_type warning +#log_type notice +#log_type information +log_type none + +# If set to true, client connection and disconnection messages will be included +# in the log. +#connection_messages true + +# If set to true, add a timestamp value to each log message. +#log_timestamp true + +# ================================================================= +# Security +# ================================================================= + +# If set, only clients that have a matching prefix on their +# clientid will be allowed to connect to the broker. By default, +# all clients may connect. +# For example, setting "secure-" here would mean a client "secure- +# client" could connect but another with clientid "mqtt" couldn't. +#clientid_prefixes + +# Boolean value that determines whether clients that connect +# without providing a username are allowed to connect. If set to +# false then a password file should be created (see the +# password_file option) to control authenticated client access. +# Defaults to true. +#allow_anonymous true + +# In addition to the clientid_prefixes, allow_anonymous and TLS +# authentication options, username based authentication is also +# possible. The default support is described in "Default +# authentication and topic access control" below. The auth_plugin +# allows another authentication method to be used. +# Specify the path to the loadable plugin and see the +# "Authentication and topic access plugin options" section below. +#auth_plugin + +# ----------------------------------------------------------------- +# Default authentication and topic access control +# ----------------------------------------------------------------- + +# Control access to the broker using a password file. This file can be +# generated using the mosquitto_passwd utility. If TLS support is not compiled +# into mosquitto (it is recommended that TLS support should be included) then +# plain text passwords are used, in which case the file should be a text file +# with lines in the format: +# username:password +# The password (and colon) may be omitted if desired, although this +# offers very little in the way of security. +# +# See the TLS client require_certificate and use_identity_as_username options +# for alternative authentication options. +#password_file + +# Access may also be controlled using a pre-shared-key file. This requires +# TLS-PSK support and a listener configured to use it. The file should be text +# lines in the format: +# identity:key +# The key should be in hexadecimal format without a leading "0x". +#psk_file + +# Control access to topics on the broker using an access control list +# file. If this parameter is defined then only the topics listed will +# have access. +# If the first character of a line of the ACL file is a # it is treated as a +# comment. +# Topic access is added with lines of the format: +# +# topic [read|write] +# +# The access type is controlled using "read" or "write". This parameter +# is optional - if not given then the access is read/write. +# can contain the + or # wildcards as in subscriptions. +# +# The first set of topics are applied to anonymous clients, assuming +# allow_anonymous is true. User specific topic ACLs are added after a +# user line as follows: +# +# user +# +# The username referred to here is the same as in password_file. It is +# not the clientid. +# +# +# If is also possible to define ACLs based on pattern substitution within the +# topic. The patterns available for substition are: +# +# %c to match the client id of the client +# %u to match the username of the client +# +# The substitution pattern must be the only text for that level of hierarchy. +# +# The form is the same as for the topic keyword, but using pattern as the +# keyword. +# Pattern ACLs apply to all users even if the "user" keyword has previously +# been given. +# +# pattern [read|write] +# +# Example: +# +# pattern write sensor/%u/data +# +#acl_file + +# ----------------------------------------------------------------- +# Authentication and topic access plugin options +# ----------------------------------------------------------------- + +# If the auth_plugin option above is used, define options to pass to the +# plugin here as described by the plugin instructions. All options named +# using the format auth_opt_* will be passed to the plugin, for example: +# +# auth_opt_db_host +# auth_opt_db_port +# auth_opt_db_username +# auth_opt_db_password + + +# ================================================================= +# Bridges +# ================================================================= + +# A bridge is a way of connecting multiple MQTT brokers together. +# Create a new bridge using the "connection" option as described below. Set +# options for the bridges using the remaining parameters. You must specify the +# address and at least one topic to subscribe to. +# Each connection must have a unique name. +# Only a single address per configuration is currently supported, +# unlike in rsmb. +# The direction that the topic will be shared can be chosen by +# specifying out, in or both, where the default value is out. +# The QoS level of the bridged communication can be specified with the next +# topic option. The default QoS level is 0, to change the QoS the topic +# direction must also be given. +# The local and remote prefix options allow a topic to be remapped when it is +# bridged to/from the remote broker. This provides the ability to place a topic +# tree in an appropriate location. +# For more details see the mosquitto.conf man page. +# Multiple topics can be specified per connection, but be careful +# not to create any loops. +# If you are using bridges with cleansession set to false (the default), then +# you may get unexpected behaviour from incoming topics if you change what +# topics you are subscribing to. This is because the remote broker keeps the +# subscription for the old topic. If you have this problem, connect your bridge +# with cleansession set to true, then reconnect with cleansession set to false +# as normal. +#connection +#address [:] +#topic [[[out | in | both] qos-level] local-prefix remote-prefix] + +# Set the client id for this bridge connection. If not defined, +# this defaults to 'name.hostname' where name is the connection +# name and hostname is the hostname of this computer. +#clientid + +# Set the clean session variable for this bridge. +# When set to true, when the bridge disconnects for any reason, all +# messages and subscriptions will be cleaned up on the remote +# broker. Note that with cleansession set to true, there may be a +# significant amount of retained messages sent when the bridge +# reconnects after losing its connection. +# When set to false, the subscriptions and messages are kept on the +# remote broker, and delivered when the bridge reconnects. +#cleansession false + +# If set to true, publish notification messages to the local and remote brokers +# giving information about the state of the bridge connection. Retained +# messages are published to the topic $SYS/broker/connection//state +# unless the notification_topic option is used. +# If the message is 1 then the connection is active, or 0 if the connection has +# failed. +#notifications true + +# Choose the topic on which notification messages for this bridge are +# published. If not set, messages are published on the topic +# $SYS/broker/connection//state +#notification_topic + +# Set the keepalive interval for this bridge connection, in +# seconds. +#keepalive_interval 60 + +# Set the start type of the bridge. This controls how the bridge starts and +# can be one of three types: automatic, lazy and once. Note that RSMB provides +# a fourth start type "manual" which isn't currently supported by mosquitto. +# +# "automatic" is the default start type and means that the bridge connection +# will be started automatically when the broker starts and also restarted +# after a short delay (30 seconds) if the connection fails. +# +# Bridges using the "lazy" start type will be started automatically when the +# number of queued messages exceeds the number set with the "threshold" +# parameter. It will be stopped automatically after the time set by the +# "idle_timeout" parameter. Use this start type if you wish the connection to +# only be active when it is needed. +# +# A bridge using the "once" start type will be started automatically when the +# broker starts but will not be restarted if the connection fails. +#start_type automatic + +# Set the amount of time a bridge using the automatic start type will wait +# until attempting to reconnect. Defaults to 30 seconds. +#restart_timeout 30 + +# Set the amount of time a bridge using the lazy start type must be idle before +# it will be stopped. Defaults to 60 seconds. +#idle_timeout 60 + +# Set the number of messages that need to be queued for a bridge with lazy +# start type to be restarted. Defaults to 10 messages. +# Must be less than max_queued_messages. +#threshold 10 + +# If try_private is set to true, the bridge will attempt to indicate to the +# remote broker that it is a bridge not an ordinary client. If successful, this +# means that loop detection will be more effective and that retained messages +# will be propagated correctly. Not all brokers support this feature so it may +# be necessary to set try_private to false if your bridge does not connect +# properly. +#try_private true + +# Set the username to use when connecting to an MQTT v3.1 broker +# that requires authentication. +#username + +# Set the password to use when connecting to an MQTT v3.1 broker +# that requires authentication. This option is only used if +# username is also set. +#password + +# ----------------------------------------------------------------- +# Certificate based SSL/TLS support +# ----------------------------------------------------------------- +# Either bridge_cafile or bridge_capath must be defined to enable TLS support +# for this bridge. +# bridge_cafile defines the path to a file containing the +# Certificate Authority certificates that have signed the remote broker +# certificate. +# bridge_capath defines a directory that will be searched for files containing +# the CA certificates. For bridge_capath to work correctly, the certificate +# files must have ".crt" as the file ending and you must run "c_rehash " each time you add/remove a certificate. +#bridge_cafile +#bridge_capath + +# Path to the PEM encoded client certificate, if required by the remote broker. +#bridge_certfile + +# Path to the PEM encoded client private key, if required by the remote broker. +#bridge_keyfile + +# ----------------------------------------------------------------- +# PSK based SSL/TLS support +# ----------------------------------------------------------------- +# Pre-shared-key encryption provides an alternative to certificate based +# encryption. A bridge can be configured to use PSK with the bridge_identity +# and bridge_psk options. These are the client PSK identity, and pre-shared-key +# in hexadecimal format with no "0x". Only one of certificate and PSK based +# encryption can be used on one +# bridge at once. +#bridge_identity +#bridge_psk + + +# ================================================================= +# External config files +# ================================================================= + +# External configuration files may be included by using the +# include_dir option. This defines a directory that will be searched +# for config files. All files that end in '.conf' will be loaded as +# a configuration file. It is best to have this as the last option +# in the main file. This option will only be processed from the main +# configuration file. The directory specified must not contain the +# main configuration file. +#include_dir + +# ================================================================= +# Unsupported rsmb options - for the future +# ================================================================= + +#addresses +#round_robin + +# ================================================================= +# rsmb options - unlikely to ever be supported +# ================================================================= + +#ffdc_output +#max_log_entries +#trace_level +#trace_output From b93bf045860a5ec1b7cd8385ffabe4e2412048a9 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 25 Jun 2013 13:15:38 +0100 Subject: [PATCH 26/30] Refactored inflight support in Client to support storing them on disconnection. --- lib/client.js | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/client.js b/lib/client.js index c02e723..81f89ae 100644 --- a/lib/client.js +++ b/lib/client.js @@ -116,6 +116,7 @@ Client.prototype.setUpTimer = function() { */ Client.prototype.actualSend = function(packet, retry) { var that = this; + var timer; if (that._closed) { this.logger.warn({ packet: packet, retry: retry }, "tryint to send a packet to a disconnected client"); @@ -130,12 +131,18 @@ Client.prototype.actualSend = function(packet, retry) { if (packet.qos === 1) { this.logger.debug({ packet: packet, retry: retry }, "setting up the resend timer"); - this.inflight[packet.messageId] = setTimeout(function() { + + timer = setTimeout(function() { retry++; that.actualSend(packet, retry); // exponential backoff algorithm }, this.server.opts.baseRetryTimeout * Math.pow(2, retry)); + + this.inflight[packet.messageId] = { + packet: packet, + timer: timer + }; } } }; @@ -257,7 +264,7 @@ Client.prototype.handlePuback = function(packet) { logger.debug({ packet: packet }, "puback"); if (this.inflight[packet.messageId]) { - clearTimeout(this.inflight[packet.messageId]); + clearTimeout(this.inflight[packet.messageId].timer); delete this.inflight[packet.messageId]; } else { logger.warn({ packet: packet }, "no such packet"); @@ -438,6 +445,9 @@ Client.prototype.close = function(callback) { var cleanup = function() { that._closed = true; + that.connection.removeAllListeners(); + that.server.emit("clientDisconnected", that); + // clears the inflights timeout here // as otherwise there might be one issued // after calling end() @@ -446,8 +456,6 @@ Client.prototype.close = function(callback) { delete that.inflight[id]; }); - that.connection.removeAllListeners(); - that.server.emit("clientDisconnected", that); if (callback) { callback(); } From 722af400935494189743af8e7f886b6a863bd2fc Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Tue, 25 Jun 2013 13:47:00 +0100 Subject: [PATCH 27/30] Added support for storing inflight packets for client. --- lib/persistance/abstract.js | 12 +++++++ lib/persistance/levelup.js | 16 ++++++---- lib/persistance/mongo.js | 12 ++++--- lib/persistance/redis.js | 6 +++- test/persistance/abstract.js | 61 ++++++++++++++++++++++++++++++++++++ 5 files changed, 96 insertions(+), 11 deletions(-) diff --git a/lib/persistance/abstract.js b/lib/persistance/abstract.js index 3f8c98f..294df4b 100644 --- a/lib/persistance/abstract.js +++ b/lib/persistance/abstract.js @@ -1,5 +1,7 @@ "use strict"; +var async = require("async"); + function AbstractPersistence() { } @@ -60,7 +62,17 @@ AbstractPersistence.prototype.wire = function(server) { server.on("clientDisconnecting", function(client) { that.storeSubscriptions(client); + that.storeInflightPackets(client); }); }; +AbstractPersistence.prototype.storeInflightPackets = function(client, done) { + if (client.inflight) { + var that = this; + async.each(Object.keys(client.inflight), function(key, cb) { + that._storePacket(client.id, client.inflight[key].packet, cb); + }, done); + } +}; + module.exports = AbstractPersistence; diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index 428a89d..6f27511 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -127,18 +127,13 @@ LevelUpPersistance.prototype.lookupSubscriptions = function(client, done) { LevelUpPersistance.prototype.storeOfflinePacket = function(packet, done) { var that = this; var subs = this._subLobber.match(packet.topic); - var ttl = { - ttl: that.options.ttl.subscriptions - }; async.each(subs, function(key, cb) { that._subscriptions.get(key, function(err, sub) { if (err) { return cb(err); } - var key = util.format("%s:%s", sub.client, new Date().toISOString()); - that._offlinePackets.put( - key, packet, ttl, cb); + that._storePacket(sub.client, packet, cb); }); }, done); }; @@ -165,6 +160,15 @@ LevelUpPersistance.prototype.streamOfflinePackets = function(client, cb, done) { } }; +LevelUpPersistance.prototype._storePacket = function(client, packet, cb) { + var key = util.format("%s:%s", client, new Date().toISOString()); + var ttl = { + ttl: this.options.ttl.subscriptions + }; + this._offlinePackets.put( + key, packet, ttl, cb); +}; + LevelUpPersistance.prototype.close = function(cb) { this.db.close(cb); }; diff --git a/lib/persistance/mongo.js b/lib/persistance/mongo.js index e44e116..066b0cd 100644 --- a/lib/persistance/mongo.js +++ b/lib/persistance/mongo.js @@ -160,10 +160,7 @@ MongoPersistance.prototype.storeOfflinePacket = function(packet, done) { stream.on("data", function(data) { started++; - that._packets.insert({ - client: data.client, - packet: packet - }, function(err) { + that._storePacket(data.client, packet, function(err) { if (err) { return stream.emit("error", err); } @@ -185,6 +182,13 @@ MongoPersistance.prototype.storeOfflinePacket = function(packet, done) { }); }; +MongoPersistance.prototype._storePacket = function(client, packet, cb) { + this._packets.insert({ + client: client, + packet: packet + }, cb); +}; + MongoPersistance.prototype.streamOfflinePackets = function(client, cb) { if (client.clean) { return; diff --git a/lib/persistance/redis.js b/lib/persistance/redis.js index 0ba1ddd..7ab367b 100644 --- a/lib/persistance/redis.js +++ b/lib/persistance/redis.js @@ -177,10 +177,14 @@ RedisPersistance.prototype.storeOfflinePacket = function(packet, done) { var matches = this._subLobber.match(packet.topic); async.each(matches, function(client, cb) { - that._client.lpush("packets:" + client, JSON.stringify(packet), cb); + that._storePacket(client, packet, cb); }, done); }; +RedisPersistance.prototype._storePacket = function(client, packet, cb) { + this._client.lpush("packets:" + client, JSON.stringify(packet), cb); +}; + RedisPersistance.prototype.streamOfflinePackets = function(client, cb) { var that = this; diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index 0818bf3..ca5eb3a 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -566,4 +566,65 @@ module.exports = function(create) { }); }); }); + + describe("inflight packets", function() { + var packet = { + topic: "hello", + qos: 0, + payload: "world", + messageId: 42 + }; + var client = { + id: "my client id - 42", + clean: false, + subscriptions: { + hello: { + qos: 1 + } + }, + inflight: { + 42: { packet: packet } + } + }; + + it("should store one inflight packet", function(done) { + this.instance.storeInflightPackets(client, done); + }); + + it("should store and stream an inflight packet", function(done) { + var instance = this.instance; + instance.storeInflightPackets(client, function() { + instance.streamOfflinePackets(client, function(err, p) { + expect(p).to.eql(packet); + done(); + }); + }); + }); + + it("should delete the offline packets once streamed", function(done) { + var instance = this.instance; + instance.storeInflightPackets(client, function() { + instance.streamOfflinePackets(client, function(err, p) { + instance.streamOfflinePackets(client, function(err, p2) { + done(new Error("this should never be called")); + }); + done(); + }); + }); + }); + + it("should wire itself up to the 'clientDisconnecting' event of a Server", function(done) { + var em = new EventEmitter(); + var instance = this.instance; + instance.wire(em); + + em.emit("clientDisconnecting", client); + + setTimeout(function() { + instance.streamOfflinePackets(client, function(err, packet) { + done(); + }); + }, 20); // 20ms will suffice + }); + }); }; From 89a136e998ad6b43f2bfd5469a2db8c3d6e0c83b Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Wed, 26 Jun 2013 16:08:36 +0100 Subject: [PATCH 28/30] Added logging to the persistance wiring. --- .jshintrc | 3 ++- lib/persistance/abstract.js | 5 ++++- test/persistance/abstract.js | 17 +++++++++++++++++ test/persistance/levelup_spec.js | 1 + 4 files changed, 24 insertions(+), 2 deletions(-) diff --git a/.jshintrc b/.jshintrc index 3729cb1..fdee7df 100644 --- a/.jshintrc +++ b/.jshintrc @@ -10,6 +10,7 @@ "describe": false, "mosca": false, "expect": false, - "before": false + "before": false, + "moscaSettings": false } } diff --git a/lib/persistance/abstract.js b/lib/persistance/abstract.js index 294df4b..5d8eb1c 100644 --- a/lib/persistance/abstract.js +++ b/lib/persistance/abstract.js @@ -42,6 +42,7 @@ AbstractPersistence.prototype.wire = function(server) { } Object.keys(subscriptions).forEach(function(topic) { + client.logger.info({ topic: topic, qos: subscriptions[topic].qos }, "restoring subscription"); client.handleAuthorizeSubscribe( null, true, { topic: topic, @@ -55,13 +56,15 @@ AbstractPersistence.prototype.wire = function(server) { client.emit("error", err); return; } - + client.logger.info({ packet: packet }, "Forwarding offline packet"); client.forward(packet.topic, packet.payload, packet, packet.topic); }); }); server.on("clientDisconnecting", function(client) { + client.logger.info("Storing offline subscriptions"); that.storeSubscriptions(client); + client.logger.info("Storing inflight packets"); that.storeInflightPackets(client); }); }; diff --git a/test/persistance/abstract.js b/test/persistance/abstract.js index ca5eb3a..579142c 100644 --- a/test/persistance/abstract.js +++ b/test/persistance/abstract.js @@ -140,6 +140,7 @@ module.exports = function(create) { }; var client = { + logger: moscaSettings().logger, forward: function(topic, payload, options, pattern) { expect(topic).to.eql(packet1.topic); expect(payload).to.eql(packet1.payload); @@ -166,6 +167,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -179,6 +181,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -196,6 +199,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -216,6 +220,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: true, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -237,6 +242,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -258,6 +264,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -284,6 +291,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -313,6 +321,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -337,6 +346,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -359,6 +369,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 0 @@ -379,6 +390,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -435,6 +447,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -459,6 +472,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: 1 } @@ -481,6 +495,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 @@ -538,6 +553,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { "hello/#": { qos: 1 @@ -577,6 +593,7 @@ module.exports = function(create) { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { hello: { qos: 1 diff --git a/test/persistance/levelup_spec.js b/test/persistance/levelup_spec.js index 7d2c277..b0111ef 100644 --- a/test/persistance/levelup_spec.js +++ b/test/persistance/levelup_spec.js @@ -33,6 +33,7 @@ describe("mosca.persistance.LevelUp", function() { var client = { id: "my client id - 42", clean: false, + logger: moscaSettings().logger, subscriptions: { "hello/#": { qos: 1 From a0fac09024829bbaeb91d90dfd250f3bbb12d264 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Wed, 26 Jun 2013 16:39:10 +0100 Subject: [PATCH 29/30] Added API comments. --- lib/persistance/abstract.js | 39 +++++++++++++++++++++++++++++++++++++ lib/persistance/index.js | 5 +++++ lib/persistance/levelup.js | 29 +++++++++++++++++++++++++++ lib/persistance/memory.js | 14 +++++++++++++ lib/persistance/mongo.js | 30 ++++++++++++++++++++++++++++ lib/persistance/redis.js | 30 ++++++++++++++++++++++++++++ 6 files changed, 147 insertions(+) diff --git a/lib/persistance/abstract.js b/lib/persistance/abstract.js index 5d8eb1c..fdd7c17 100644 --- a/lib/persistance/abstract.js +++ b/lib/persistance/abstract.js @@ -2,10 +2,22 @@ var async = require("async"); +/** + * An Abstract Mosca persistance implementation + * + * @api public + */ function AbstractPersistence() { } +/** + * This wires the Mosca server to a persistance, plugging in + * the persistance into the relevant Mosca events. + * + * @api public + * @param {Server} server The Mosca Server. + */ AbstractPersistence.prototype.wire = function(server) { var that = this; var nop = function() {}; @@ -69,13 +81,40 @@ AbstractPersistence.prototype.wire = function(server) { }); }; +/** + * Store the current in-flight packets for the given client in the Offline Packets + * store. + * + * @api private + * @param {Client} client the Mosca client + * @param {Function} done the callback that will be called after everything is done + */ AbstractPersistence.prototype.storeInflightPackets = function(client, done) { if (client.inflight) { var that = this; async.each(Object.keys(client.inflight), function(key, cb) { that._storePacket(client.id, client.inflight[key].packet, cb); }, done); + } else if (done) { + done(); } }; +/** + * Close the persistance. + * + * @api public + * @param {Function} done the callback + */ +AbstractPersistence.prototype.close = function(done) { + if (done) { + done(new Error("not implemented yet")); + } +}; + +/** + * Export it as a module + * + * @api public + */ module.exports = AbstractPersistence; diff --git a/lib/persistance/index.js b/lib/persistance/index.js index 14110b9..e96a347 100644 --- a/lib/persistance/index.js +++ b/lib/persistance/index.js @@ -1,5 +1,10 @@ "use strict"; +/** + * Module exports + * + * @api public + */ module.exports.Memory = require("./memory"); module.exports.LevelUp = require("./levelup"); module.exports.Redis = require("./redis"); diff --git a/lib/persistance/levelup.js b/lib/persistance/levelup.js index 6f27511..c50ec08 100644 --- a/lib/persistance/levelup.js +++ b/lib/persistance/levelup.js @@ -9,6 +9,24 @@ var ttl = require('level-ttl'); var Qlobber = require("qlobber").Qlobber; var async = require("async"); +/** + * A LevelUp-based persistance. + * + * The current options include: + * - `path`, the path to the database + * - `ttl`, an object containing three values: + * * `checkFrequency`, the frequency at which the + * the expiration will be checked. It defaults to 1 minute. + * * `subscriptions`, the time (ms) after which subscriptions + * will expire. It defaults to 1 hour. + * * `packets`, the time (ms) after which packets will expire. + * It defaults to 1 hour. + * - `db`, the AbstractLevelDown implementation. + * - all other `levelup` otions. + * + * @api public + * @param {Object} options The options to create this persistance + */ function LevelUpPersistance(options) { if (!(this instanceof LevelUpPersistance)) { return new LevelUpPersistance(options); @@ -42,6 +60,12 @@ function LevelUpPersistance(options) { util.inherits(LevelUpPersistance, AbstractPersistence); +/** + * Private methods, not inteded to be called from outside + * + * @api private + */ + LevelUpPersistance.prototype.storeRetained = function(packet, cb) { this._retained.put(packet.topic, packet, cb); }; @@ -173,4 +197,9 @@ LevelUpPersistance.prototype.close = function(cb) { this.db.close(cb); }; +/** + * Export it as a module + * + * @api public + */ module.exports = LevelUpPersistance; diff --git a/lib/persistance/memory.js b/lib/persistance/memory.js index 646aa0c..947b211 100644 --- a/lib/persistance/memory.js +++ b/lib/persistance/memory.js @@ -5,6 +5,15 @@ var util = require("util"); var MemDOWN = require("memdown"); var factory = function (location) { return new MemDOWN(location); }; +/** + * A persistance based in memory that uses LevelUp with + * MemDOWN. + * + * It exposes the same options of the LevelUpPersistance, + * minus the `db`, which is set to MemDOWN for convenience. + * + * @api public + */ function MemoryPersistance(options) { if (!(this instanceof MemoryPersistance)) { return new MemoryPersistance(options); @@ -18,4 +27,9 @@ function MemoryPersistance(options) { util.inherits(MemoryPersistance, LevelUpPersistance); +/** + * Export it as a module + * + * @api public + */ module.exports = MemoryPersistance; diff --git a/lib/persistance/mongo.js b/lib/persistance/mongo.js index 066b0cd..60b30b9 100644 --- a/lib/persistance/mongo.js +++ b/lib/persistance/mongo.js @@ -7,6 +7,25 @@ var async = require("async"); var Qlobber = require("qlobber").Qlobber; var topicPatterns = require("./utils").topicPatterns; +/** + * A persistance based on MongoDB. + * It currently performs in save mode. + * + * The current options include: + * - `url`, the connection URL of the database + * - `ttl`, an object containing three values: + * * `subscriptions`, the time (ms) after which subscriptions + * will expire. It defaults to 1 hour. + * * `packets`, the time (ms) after which packets will expire. + * It defaults to 1 hour. + * - `mongo`, all the options for the MongoDB driver. + * - `connection`, a MongoDB client to be reused + * + * @api public + * @param {Object} options The options, as describe above. + * @param {Function} done The callback that will be called + * when the persistance is ready + */ function MongoPersistance(options, done) { if (!(this instanceof MongoPersistance)) { return new MongoPersistance(options); @@ -63,6 +82,12 @@ function MongoPersistance(options, done) { util.inherits(MongoPersistance, AbstractPersistence); +/** + * Private methods, not inteded to be called from outside + * + * @api private + */ + MongoPersistance.prototype.storeSubscriptions = function(client, done) { var subscriptions; @@ -216,4 +241,9 @@ MongoPersistance.prototype.close = function(cb) { } }; +/** + * Export it as a module + * + * @api public + */ module.exports = MongoPersistance; diff --git a/lib/persistance/redis.js b/lib/persistance/redis.js index 7ab367b..8b1ac37 100644 --- a/lib/persistance/redis.js +++ b/lib/persistance/redis.js @@ -6,6 +6,25 @@ var util = require("util"); var Qlobber = require("qlobber").Qlobber; var async = require("async"); +/** + * A Redis-based persistance. + * + * The current options include: + * - `port`, the Redis' port. + * - `host`, the Redis' host. + * - `password`, the Redis' password. + * - `redisOpts`, the options for the Redis client. + * - `channel`, the pub/sub channel that will be used to synchronize + * the various clients. Defaults to `'moscaSync'`. + * - `ttl`, an object containing three values: + * * `subscriptions`, the time (ms) after which subscriptions + * will expire. It defaults to 1 hour. + * * `packets`, the time (ms) after which packets will expire. + * - all other `levelup` otions. It defaults to 1 hour. + * + * @api public + * @param {Object} options The options to create this persistance + */ function RedisPersistance(options) { if (!(this instanceof RedisPersistance)) { return new RedisPersistance(options); @@ -55,6 +74,12 @@ function RedisPersistance(options) { util.inherits(RedisPersistance, AbstractPersistence); +/** + * Private methods, not inteded to be called from outside + * + * @api private + */ + RedisPersistance.prototype._buildClient = function() { var options = this.options; var client = redis.createClient( @@ -209,4 +234,9 @@ RedisPersistance.prototype.close = function(cb) { this._client.quit(); }; +/** + * Export it as a module + * + * @api public + */ module.exports = RedisPersistance; From 70bb77ff64da9a0778f6d644a7e23011a8b60727 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Wed, 26 Jun 2013 17:01:05 +0100 Subject: [PATCH 30/30] Updated the README with the persistance support. --- README.md | 50 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 58c07e2..b6b1e1d 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,9 @@ client. ## Features * MQTT 3.1 compliant -* QoS 0 and QoS 1, but without storage +* QoS 0 and QoS 1 +* Various storage options for QoS 1 offline packets, + and subscriptions. * Built on top on node.js * As fast as it is possible * Usable inside ANY other node.js app, see the @@ -52,20 +54,29 @@ client. Mosca supports some command line options: ``` -Usage: mosca [options] + Usage: mosca [options] [command] + + Commands: + + adduser Add a user to the given credentials file + rmuser Removes a user from the given credentials file + start start the server (optional) Options: - -h, --help output usage information - -V, --version output the version number - -p, --port the port to listen to - --parent-port the parent port to connect to - --parent-host the parent host to connect to - --parent-prefix the prefix to use in the parent broker - -c, --config the config file to use (override every - other options) - -v, --verbose set the bunyan log to INFO - --very-verbose set the bunyan log to DEBUG + -h, --help output usage information + -V, --version output the version number + -p, --port the port to listen to + --parent-port the parent port to connect to + --parent-host the parent host to connect to + --parent-prefix the prefix to use in the parent broker + --credentials the file containing the credentials + --authorize-publish the pattern for publishing to topics for the added user + --authorize-subscribe the pattern for subscribing to topics for the added user + -c, --config the config file to use (override every other option) + -d, --db the path were to store the database + -v, --verbose set the bunyan log to INFO + --very-verbose set the bunyan log to DEBUG ``` However you can only use a MQTT backend with the command line options. @@ -126,6 +137,21 @@ The patterns are checked and validated using The credentials file can be automatically reladed by __Mosca__ if it receives a `SIGHUP`. +## Persistance + +The MQTT specification requires a persistent storage for offline QoS 1 +subscription that has been done by an unclean client. +__Mosca__ offers several persitance options: + +* [Memory](http://mcollina.github.com/mosca/docs/lib/persistance/memory.js.html), +* [LevelUp](http://mcollina.github.com/mosca/docs/lib/persistance/levelup.js.html), +* [Redis](http://mcollina.github.com/mosca/docs/lib/persistance/redis.js.html), +* [MongoDB](http://mcollina.github.com/mosca/docs/lib/persistance/mongo.js.html), + +All of them can be configured from the configuration file, under the +`persistance` key. The only exception is LevelUp, which can be specified +by using the `--db` option from the command line. + ## Contributing to Mosca * Check out the latest master to make sure the feature hasn't been