|
@@ -16,10 +16,14 @@
|
|
|
opt.code.from = opt.code.from || '!';
|
|
|
opt.jsonify = true;
|
|
|
|
|
|
+
|
|
|
function ename(t){ return encodeURIComponent(t).replace(/\*/g, '%2A') }
|
|
|
function atomic(v){ return u !== v && (!v || 'object' != typeof v) }
|
|
|
+ var timediate = (typeof setImmediate === "undefined")? setTimeout : setImmediate;
|
|
|
+ var puff = setTimeout.puff || timediate;
|
|
|
var map = Gun.obj.map;
|
|
|
- var LOG = console.LOG;
|
|
|
+ var obj_empty = Gun.obj.empty;
|
|
|
+ var ST = 0;
|
|
|
|
|
|
if(!opt.store){
|
|
|
return opt.log("ERROR: Radisk needs `opt.store` interface with `{get: fn, put: fn (, list: fn)}`!");
|
|
@@ -39,112 +43,96 @@
|
|
|
1. Because writing to disk takes time, we should batch data to disk. This improves performance, and reduces potential disk corruption.
|
|
|
2. If a batch exceeds a certain number of writes, we should immediately write to disk when physically possible. This caps total performance, but reduces potential loss.
|
|
|
*/
|
|
|
- var r = function(key, val, cb){
|
|
|
- key = ''+key;
|
|
|
- if(val instanceof Function){
|
|
|
+ var r = function(key, data, cb, tag, DBG){
|
|
|
+ if('function' === typeof data){
|
|
|
var o = cb || {};
|
|
|
- cb = val;
|
|
|
- var S; LOG && (S = +new Date);
|
|
|
- val = r.batch(key);
|
|
|
- LOG && opt.log(S, +new Date - S, 'rad mem');
|
|
|
- if(u !== val){
|
|
|
- cb(u, r.range(val, o), o);
|
|
|
- if(atomic(val)){ return }
|
|
|
- // if a node is requested and some of it is cached... the other parts might not be.
|
|
|
- }
|
|
|
- if(r.thrash.at){
|
|
|
- val = r.thrash.at(key);
|
|
|
- if(u !== val){
|
|
|
- cb(u, r.range(val, o), o);
|
|
|
- if(atomic(val)){ cb(u, val, o); return }
|
|
|
- // if a node is requested and some of it is cached... the other parts might not be.
|
|
|
- }
|
|
|
- }
|
|
|
- return r.read(key, cb, o);
|
|
|
+ cb = data;
|
|
|
+ r.read(key, cb, o, DBG || tag);
|
|
|
+ return;
|
|
|
}
|
|
|
- r.batch(key, val);
|
|
|
- if(cb){ r.batch.acks.push(cb) }
|
|
|
- if(++r.batch.ed >= opt.batch){ return r.thrash() } // (2)
|
|
|
- if(r.batch.to){ return }
|
|
|
- //clearTimeout(r.batch.to); // (1) // THIS LINE IS EVIL! NEVER USE IT! ALSO NEVER DELETE THIS SO WE NEVER MAKE THE SAME MISTAKE AGAIN!
|
|
|
- r.batch.to = setTimeout(r.thrash, opt.until || 1);
|
|
|
- }
|
|
|
-
|
|
|
- r.batch = Radix();
|
|
|
- r.batch.acks = [];
|
|
|
- r.batch.ed = 0;
|
|
|
-
|
|
|
- r.thrash = function(){
|
|
|
- var thrash = r.thrash;
|
|
|
- if(thrash.ing){ return thrash.more = true }
|
|
|
- LOG = console.LOG; // dirty place to cheaply update LOG settings over time.
|
|
|
- thrash.more = false;
|
|
|
- thrash.ing = true;
|
|
|
- var batch = thrash.at = r.batch, i = 0;
|
|
|
- clearTimeout(r.batch.to);
|
|
|
- r.batch = null;
|
|
|
- r.batch = Radix();
|
|
|
- r.batch.acks = [];
|
|
|
- r.batch.ed = 0;
|
|
|
- //console.debug(99); var ID = Gun.text.random(2), S = (+new Date); console.log("[[[[[[[[", ID, batch.acks.length);
|
|
|
- r.save(batch, function(err, ok){
|
|
|
- if(++i > 1){ opt.log('RAD ERR: Radisk has callbacked multiple times, please report this as a BUG at github.com/amark/gun/issues ! ' + i); return }
|
|
|
- if(err){ opt.log('err', err) }
|
|
|
- //console.debug(99); var TMP; console.log("]]]]]]]]", ID, batch.acks.length, (TMP = +new Date) - S, 'more?', thrash.more);
|
|
|
- map(batch.acks, function(cb){ cb(err, ok) });
|
|
|
- //console.log("][", +new Date - TMP, thrash.more);
|
|
|
- thrash.at = null;
|
|
|
- thrash.ing = false;
|
|
|
- if(thrash.more){ thrash() }
|
|
|
- });
|
|
|
+ //var tmp = (tmp = r.batch = r.batch || {})[key] = tmp[key] || {};
|
|
|
+ //var tmp = (tmp = r.batch = r.batch || {})[key] = data;
|
|
|
+ r.save(key, data, cb, tag, DBG);
|
|
|
}
|
|
|
-
|
|
|
- /*
|
|
|
- 1. Find the first radix item in memory.
|
|
|
- 2. Use that as the starting index in the directory of files.
|
|
|
- 3. Find the first file that is lexically larger than it,
|
|
|
- 4. Read the previous file to that into memory
|
|
|
- 5. Scan through the in memory radix for all values lexically less than the limit.
|
|
|
- 6. Merge and write all of those to the in-memory file and back to disk.
|
|
|
- 7. If file too large, split. More details needed here.
|
|
|
- */
|
|
|
- r.save = function(rad, cb){
|
|
|
- var s = function Span(){};
|
|
|
- s.find = function(tree, key){
|
|
|
- if(key < s.start){ return }
|
|
|
- s.start = key;
|
|
|
- r.list(s.lex);
|
|
|
- return true;
|
|
|
+ r.save = function(key, data, cb, tag, DBG){
|
|
|
+ var s = {key: key}, tags, f, d, q;
|
|
|
+ s.find = function(file){ var tmp;
|
|
|
+ s.file = file || (file = opt.code.from);
|
|
|
+ DBG && (DBG = DBG[file] = DBG[file] || {});
|
|
|
+ DBG && (DBG.sf = DBG.sf || +new Date);
|
|
|
+ if(tmp = r.disk[file]){ s.mix(u, tmp); return }
|
|
|
+ r.parse(file, s.mix, u, DBG);
|
|
|
}
|
|
|
- s.lex = function(file){
|
|
|
- file = (u === file)? u : decodeURIComponent(file);
|
|
|
- if(!file || file > s.start){
|
|
|
- s.mix(s.file || opt.code.from, s.start, s.end = file);
|
|
|
- return true;
|
|
|
+ s.mix = function(err, disk){
|
|
|
+ DBG && (DBG.sml = +new Date);
|
|
|
+ DBG && (DBG.sm = DBG.sm || +new Date);
|
|
|
+ if(s.err = err || s.err){ cb(err); return } // TODO: HANDLE BATCH EMIT
|
|
|
+ var file = s.file = (disk||'').file || s.file, tmp;
|
|
|
+ if(!disk && file !== opt.code.from){ // corrupt file?
|
|
|
+ r.find.bad(file); // remove from dir list
|
|
|
+ r.save(key, data, cb, tag); // try again
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ (disk = r.disk[file] || (r.disk[file] = disk || Radix())).file || (disk.file = file);
|
|
|
+ if(opt.compare){
|
|
|
+ data = opt.compare(disk(key), data, key, file);
|
|
|
+ if(u === data){ cb(err, -1); return } // TODO: HANDLE BATCH EMIT
|
|
|
}
|
|
|
- s.file = file;
|
|
|
+ (s.disk = disk)(key, data);
|
|
|
+ if(tag){
|
|
|
+ (tmp = (tmp = disk.tags || (disk.tags = {}))[tag] || (tmp[tag] = r.tags[tag] || (r.tags[tag] = {})))[file] || (tmp[file] = r.one[tag] || (r.one[tag] = cb));
|
|
|
+ cb = null;
|
|
|
+ }
|
|
|
+ DBG && (DBG.st = DBG.st || +new Date);
|
|
|
+ if(disk.Q){ cb && disk.Q.push(cb); return } disk.Q = (cb? [cb] : []);
|
|
|
+ disk.to = setTimeout(s.write, opt.until);
|
|
|
}
|
|
|
- s.mix = function(file, start, end){
|
|
|
- s.start = s.end = s.file = u;
|
|
|
- r.parse(file, function(err, disk){
|
|
|
- if(err){ return cb(err) }
|
|
|
- disk = disk || Radix();
|
|
|
- Radix.map(rad, function(val, key){
|
|
|
- if(key < start){ return }
|
|
|
- if(end && end < key){ return s.start = key }
|
|
|
- // PLUGIN: consider adding HAM as an extra layer of protection
|
|
|
- disk(key, val); // merge batch[key] -> disk[key]
|
|
|
- });
|
|
|
- r.write(file, disk, s.next);
|
|
|
- });
|
|
|
+ s.write = function(){
|
|
|
+ DBG && (DBG.sto = DBG.sto || +new Date);
|
|
|
+ var file = f = s.file, disk = d = s.disk;
|
|
|
+ q = s.q = disk.Q;
|
|
|
+ tags = s.tags = disk.tags;
|
|
|
+ delete disk.Q;
|
|
|
+ delete r.disk[file];
|
|
|
+ delete disk.tags;
|
|
|
+ r.write(file, disk, s.ack, u, DBG);
|
|
|
}
|
|
|
- s.next = function(err, ok){
|
|
|
- if(s.err = err){ return cb(err) }
|
|
|
- if(s.start){ return Radix.map(rad, s.find) }
|
|
|
- cb(err, ok);
|
|
|
+ s.ack = function(err, ok){
|
|
|
+ DBG && (DBG.sa = DBG.sa || +new Date);
|
|
|
+ DBG && (DBG.sal = q.length);
|
|
|
+ var ack, tmp;
|
|
|
+ // TODO!!!! CHANGE THIS INTO PUFF!!!!!!!!!!!!!!!!
|
|
|
+ for(var id in r.tags){
|
|
|
+ if(!r.tags.hasOwnProperty(id)){ continue } var tag = r.tags[id];
|
|
|
+ if((tmp = r.disk[f]) && (tmp = tmp.tags) && tmp[tag]){ continue }
|
|
|
+ ack = tag[f];
|
|
|
+ delete tag[f];
|
|
|
+ if(!obj_empty(tag)){ continue }
|
|
|
+ delete r.tags[tag];
|
|
|
+ ack && ack(err, ok);
|
|
|
+ }
|
|
|
+ !q && (q = '');
|
|
|
+ var l = q.length, i = 0;
|
|
|
+ // TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
|
|
+ // TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
|
|
+ // TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
|
|
+ // TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
|
|
+ // TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
|
|
+ // TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
|
|
+ // TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
|
|
|
+ var S = +new Date;
|
|
|
+ for(;i < l; i++){ (ack = q[i]) && ack(err, ok) }
|
|
|
+ console.STAT && console.STAT(S, +new Date - S, 'rad acks', ename(s.file));
|
|
|
+ console.STAT && console.STAT(S, q.length, 'rad acks #', ename(s.file));
|
|
|
}
|
|
|
- Radix.map(rad, s.find);
|
|
|
- }
|
|
|
+ cb || (cb = function(err, ok){ // test delete!
|
|
|
+ if(!err){ return }
|
|
|
+ });
|
|
|
+ r.find(key, s.find);
|
|
|
+ }
|
|
|
+ r.disk = {};
|
|
|
+ r.one = {};
|
|
|
+ r.tags = {};
|
|
|
|
|
|
/*
|
|
|
Any storage engine at some point will have to do a read in order to write.
|
|
@@ -152,66 +140,99 @@
|
|
|
Therefore it is unavoidable that a read will have to happen,
|
|
|
the question is just how long you delay it.
|
|
|
*/
|
|
|
- r.write = function(file, rad, cb, o){
|
|
|
+ var RWC = 0;
|
|
|
+ r.write = function(file, rad, cb, o, DBG){
|
|
|
+ if(!rad){ cb('No radix!'); return }
|
|
|
o = ('object' == typeof o)? o : {force: o};
|
|
|
- var f = function Fractal(){};
|
|
|
+ var f = function Fractal(){}, a, b;
|
|
|
f.text = '';
|
|
|
- f.count = 0;
|
|
|
- f.file = file;
|
|
|
- f.each = function(val, key, k, pre){
|
|
|
- //console.log("RAD:::", JSON.stringify([val, key, k, pre]));
|
|
|
- if(u !== val){ f.count++ }
|
|
|
- if(opt.pack <= (val||'').length){ return cb("Record too big!"), true }
|
|
|
- var enc = Radisk.encode(pre.length) +'#'+ Radisk.encode(k) + (u === val? '' : ':'+ Radisk.encode(val)) +'\n';
|
|
|
- if((opt.chunk < f.text.length + enc.length) && (1 < f.count) && !o.force){
|
|
|
- f.text = '';
|
|
|
- f.limit = Math.ceil(f.count/2);
|
|
|
- f.count = 0;
|
|
|
- f.sub = Radix();
|
|
|
- Radix.map(rad, f.slice);
|
|
|
- return true;
|
|
|
- }
|
|
|
- f.text += enc;
|
|
|
- }
|
|
|
+ f.file = file = rad.file || (rad.file = file);
|
|
|
+ if(!file){ cb('What file?'); return }
|
|
|
f.write = function(){
|
|
|
- var tmp = ename(file);
|
|
|
- var S; LOG && (S = +new Date);
|
|
|
- opt.store.put(tmp, f.text, function(err){
|
|
|
- LOG && opt.log(S, +new Date - S, "wrote disk", tmp);
|
|
|
- if(err){ return cb(err) }
|
|
|
- r.list.add(tmp, cb);
|
|
|
+ var text = rad.raw = f.text;
|
|
|
+ r.disk[file = rad.file || f.file || file] = rad;
|
|
|
+ var S = +new Date;
|
|
|
+ DBG && (DBG.wd = S);
|
|
|
+ r.find.add(file, function add(err){
|
|
|
+ DBG && (DBG.wa = +new Date);
|
|
|
+ if(err){ cb(err); return }
|
|
|
+ opt.store.put(ename(file), text, function safe(err, ok){
|
|
|
+ DBG && (DBG.wp = +new Date);
|
|
|
+ console.STAT && console.STAT(S, ST = +new Date - S, "wrote disk", JSON.stringify(file), ++RWC, 'total all writes.');
|
|
|
+ cb(err, ok || 1);
|
|
|
+ if(!rad.Q){ delete r.disk[file] } // VERY IMPORTANT! Clean up memory, but not if there is already queued writes on it!
|
|
|
+ });
|
|
|
});
|
|
|
}
|
|
|
- f.slice = function(val, key){
|
|
|
- if(key < f.file){ return }
|
|
|
- if(f.limit < (++f.count)){
|
|
|
- var name = f.file;
|
|
|
- f.file = key;
|
|
|
- f.count = 0;
|
|
|
- r.write(name, f.sub, f.next, o);
|
|
|
- return true;
|
|
|
+ f.split = function(){
|
|
|
+ var S = +new Date;
|
|
|
+ DBG && (DBG.wf = S);
|
|
|
+ f.text = '';
|
|
|
+ if(!f.count){ f.count = 0;
|
|
|
+ Radix.map(rad, function count(){ f.count++ }); // TODO: Perf? Any faster way to get total length?
|
|
|
}
|
|
|
- f.sub(key, val);
|
|
|
- }
|
|
|
- f.next = function(err){
|
|
|
- if(err){ return cb(err) }
|
|
|
+ DBG && (DBG.wfc = f.count);
|
|
|
+ f.limit = Math.ceil(f.count/2);
|
|
|
+ var SC = f.count;
|
|
|
+ f.count = 0;
|
|
|
+ DBG && (DBG.wf1 = +new Date);
|
|
|
f.sub = Radix();
|
|
|
- if(!Radix.map(rad, f.slice)){
|
|
|
- r.write(f.file, f.sub, cb, o);
|
|
|
+ Radix.map(rad, f.slice, {reverse: 1}); // IMPORTANT: DO THIS IN REVERSE, SO LAST HALF OF DATA MOVED TO NEW FILE BEFORE DROPPING FROM CURRENT FILE.
|
|
|
+ DBG && (DBG.wf2 = +new Date);
|
|
|
+ r.write(f.end, f.sub, f.both, o);
|
|
|
+ DBG && (DBG.wf3 = +new Date);
|
|
|
+ f.hub = Radix();
|
|
|
+ Radix.map(rad, f.stop);
|
|
|
+ DBG && (DBG.wf4 = +new Date);
|
|
|
+ r.write(rad.file, f.hub, f.both, o);
|
|
|
+ DBG && (DBG.wf5 = +new Date);
|
|
|
+ console.STAT && console.STAT(S, +new Date - S, "rad split", ename(rad.file), SC);
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ f.slice = function(val, key){
|
|
|
+ f.sub(f.end = key, val);
|
|
|
+ if(f.limit <= (++f.count)){ return true }
|
|
|
+ }
|
|
|
+ f.stop = function(val, key){
|
|
|
+ if(key >= f.end){ return true }
|
|
|
+ f.hub(key, val);
|
|
|
+ }
|
|
|
+ f.both = function(err, ok){
|
|
|
+ DBG && (DBG.wfd = +new Date);
|
|
|
+ if(b){ cb(err || b); return }
|
|
|
+ if(a){ cb(err, ok); return }
|
|
|
+ a = true;
|
|
|
+ b = err;
|
|
|
+ }
|
|
|
+ f.each = function(val, key, k, pre){
|
|
|
+ if(u !== val){ f.count++ }
|
|
|
+ if(opt.pack <= (val||'').length){ return cb("Data too big!"), true }
|
|
|
+ var enc = Radisk.encode(pre.length) +'#'+ Radisk.encode(k) + (u === val? '' : ':'+ Radisk.encode(val)) +'\n';
|
|
|
+ if((opt.chunk < f.text.length + enc.length) && (1 < f.count) && !o.force){
|
|
|
+ return f.split();
|
|
|
}
|
|
|
+ f.text += enc;
|
|
|
}
|
|
|
- if(opt.jsonify){ return r.write.jsonify(f, file, rad, cb, o) } // temporary testing idea
|
|
|
+ if(opt.jsonify){ r.write.jsonify(f, rad, cb, o, DBG); return } // temporary testing idea
|
|
|
if(!Radix.map(rad, f.each, true)){ f.write() }
|
|
|
}
|
|
|
|
|
|
- r.write.jsonify = function(f, file, rad, cb, o){
|
|
|
+ r.write.jsonify = function(f, rad, cb, o, DBG){
|
|
|
var raw;
|
|
|
- var S; LOG && (S = +new Date);
|
|
|
+ var S = +new Date;
|
|
|
+ DBG && (DBG.w = S);
|
|
|
try{raw = JSON.stringify(rad.$);
|
|
|
- }catch(e){ return cb("Record too big!") }
|
|
|
- LOG && opt.log(S, +new Date - S, "rad stringified JSON");
|
|
|
+ }catch(e){ cb("Cannot radisk!"); return }
|
|
|
+ DBG && (DBG.ws = +new Date);
|
|
|
+ console.STAT && console.STAT(S, +new Date - S, "rad stringified JSON");
|
|
|
if(opt.chunk < raw.length && !o.force){
|
|
|
- if(Radix.map(rad, f.each, true)){ return }
|
|
|
+ var c = 0;
|
|
|
+ Radix.map(rad, function(){
|
|
|
+ if(c++){ return true } // more than 1 item
|
|
|
+ });
|
|
|
+ if(c > 1){
|
|
|
+ return f.split();
|
|
|
+ }
|
|
|
}
|
|
|
f.text = raw;
|
|
|
f.write();
|
|
@@ -222,81 +243,85 @@
|
|
|
if(u === o.start && u === o.end){ return tree }
|
|
|
if(atomic(tree)){ return tree }
|
|
|
var sub = Radix();
|
|
|
- Radix.map(tree, function(v,k){
|
|
|
- sub(k,v);
|
|
|
- }, o);
|
|
|
+ Radix.map(tree, function(v,k){ sub(k,v) }, o); // ONLY PLACE THAT TAKES TREE, maybe reduce API for better perf?
|
|
|
return sub('');
|
|
|
}
|
|
|
|
|
|
;(function(){
|
|
|
- var Q = {};
|
|
|
- r.read = function(key, cb, o){
|
|
|
+ r.read = function(key, cb, o, DBG){
|
|
|
o = o || {};
|
|
|
- if(RAD && !o.next){ // cache
|
|
|
- var S; LOG && (S = +new Date);
|
|
|
- var val = RAD(key);
|
|
|
- LOG && opt.log(S, +new Date - S, 'rad cached');
|
|
|
- //if(u !== val){
|
|
|
- //cb(u, val, o);
|
|
|
- if(atomic(val)){ cb(u, val, o); return }
|
|
|
- // if a node is requested and some of it is cached... the other parts might not be.
|
|
|
- //}
|
|
|
+ var g = {key: key};
|
|
|
+ g.find = function(file){ var tmp;
|
|
|
+ g.file = file || (file = opt.code.from);
|
|
|
+ DBG && (DBG = DBG[file] = DBG[file] || {});
|
|
|
+ DBG && (DBG.rf = DBG.rf || +new Date);
|
|
|
+ if(tmp = r.disk[g.file = file]){ g.check(u, tmp); return }
|
|
|
+ r.parse(file, g.check, u, DBG);
|
|
|
}
|
|
|
- o.span = (u !== o.start) || (u !== o.end); // is there a start or end?
|
|
|
- var g = function Get(){};
|
|
|
- g.lex = function(file){ var tmp;
|
|
|
- file = (u === file)? u : decodeURIComponent(file);
|
|
|
- tmp = o.next || key || (o.reverse? o.end || '\uffff' : o.start || '');
|
|
|
- if(!file || (o.reverse? file < tmp : file > tmp)){
|
|
|
- LOG && opt.log(S, +new Date - S, 'rad read lex'); S = +new Date;
|
|
|
- if(o.next || o.reverse){ g.file = file }
|
|
|
- if(tmp = Q[g.file]){
|
|
|
- tmp.push({key: key, ack: cb, file: g.file, opt: o});
|
|
|
- return true;
|
|
|
- }
|
|
|
- Q[g.file] = [{key: key, ack: cb, file: g.file, opt: o}];
|
|
|
- if(!g.file){
|
|
|
- g.it(null, u, {});
|
|
|
- return true;
|
|
|
- }
|
|
|
- r.parse(g.file, g.it);
|
|
|
- return true;
|
|
|
+ g.get = function(err, disk, info){
|
|
|
+ DBG && (DBG.rgl = +new Date);
|
|
|
+ DBG && (DBG.rg = DBG.rg || +new Date);
|
|
|
+ if(g.err = err || g.err){ cb(err); return }
|
|
|
+ var file = g.file = (disk||'').file || g.file;
|
|
|
+ if(!disk && file !== opt.code.from){ // corrupt file?
|
|
|
+ r.find.bad(file); // remove from dir list
|
|
|
+ r.read(key, cb, o); // try again
|
|
|
+ return;
|
|
|
}
|
|
|
- g.file = file;
|
|
|
- }
|
|
|
- g.it = function(err, disk, info){
|
|
|
- if(g.err = err){ opt.log('err', err) }
|
|
|
- g.info = info;
|
|
|
- if(disk){ RAD = g.disk = disk }
|
|
|
- disk = Q[g.file]; delete Q[g.file];
|
|
|
- LOG && opt.log(S, +new Date - S, 'rad read in, ack', disk.length); S = +new Date;
|
|
|
- map(disk, g.ack);
|
|
|
- LOG && opt.log(S, +new Date - S, 'rad read acked');
|
|
|
- }
|
|
|
- g.ack = function(as){
|
|
|
- if(!as.ack){ return }
|
|
|
- var key = as.key, o = as.opt, info = g.info, rad = g.disk || noop, data = r.range(rad(key), o), last = rad.last || Radix.map(rad, rev, revo);
|
|
|
- o.parsed = (o.parsed || 0) + (info.parsed||0);
|
|
|
+ disk = r.disk[file] || (r.disk[file] = disk);
|
|
|
+ if(!disk){ cb(file === opt.code.from? u : "No file!"); return }
|
|
|
+ disk.file || (disk.file = file);
|
|
|
+ var data = r.range(disk(key), o);
|
|
|
+ DBG && (DBG.rr = +new Date);
|
|
|
+ o.unit = disk.unit;
|
|
|
o.chunks = (o.chunks || 0) + 1;
|
|
|
- o.more = true;
|
|
|
- if((!as.file) // if no more places to look
|
|
|
- || (!o.span && last === key) // if our key exactly matches the very last atomic record
|
|
|
- || (!o.span && last && last > key && 0 != last.indexOf(key)) // 'zach' may be lexically larger than 'za', but there still might be more, like 'zane' in the 'za' prefix bucket so do not end here.
|
|
|
- ){
|
|
|
- o.more = u;
|
|
|
- as.ack(g.err, data, o);
|
|
|
- return
|
|
|
+ o.parsed = (o.parsed || 0) + ((info||'').parsed||(o.chunks*opt.chunk));
|
|
|
+ o.more = 1;
|
|
|
+ o.next = u;
|
|
|
+ Radix.map(r.list, function next(v,f){
|
|
|
+ if(!v || file === f){ return }
|
|
|
+ o.next = f;
|
|
|
+ return 1;
|
|
|
+ }, o.reverse? {reverse: 1, end: file} : {start: file});
|
|
|
+ DBG && (DBG.rl = +new Date);
|
|
|
+ if(!o.next){ o.more = 0 }
|
|
|
+ if(o.next){
|
|
|
+ if(!o.reverse && (key < o.next && 0 != o.next.indexOf(key)) || (u !== o.end && (o.end || '\uffff') < o.next)){ o.more = 0 }
|
|
|
+ if(o.reverse && (key > o.next && 0 != key.indexOf(o.next)) || (u !== o.start && (o.start || '') > o.next)){ o.more = 0 }
|
|
|
}
|
|
|
- if(u !== data){
|
|
|
- as.ack(g.err, data, o); // more might be coming!
|
|
|
- if(o.parsed >= o.limit){ return } // even if more, we've hit our limit, asking peer will need to make a new ask with a new starting point.
|
|
|
- }
|
|
|
- o.next = as.file;
|
|
|
- r.read(key, as.ack, o);
|
|
|
+ if(!o.more){ cb(g.err, data, o); return }
|
|
|
+ if(data){ cb(g.err, data, o) }
|
|
|
+ if(o.parsed >= o.limit){ return }
|
|
|
+ var S = +new Date;
|
|
|
+ DBG && (DBG.rm = S);
|
|
|
+ var next = o.next;
|
|
|
+ timediate(function(){
|
|
|
+ console.STAT && console.STAT(S, +new Date - S, 'rad more');
|
|
|
+ r.parse(next, g.check);
|
|
|
+ },0);
|
|
|
}
|
|
|
- if(o.reverse){ g.lex.reverse = true }
|
|
|
- LOG && (S = +new Date);
|
|
|
- r.list(g.lex);
|
|
|
+ g.check = function(err, disk, info){
|
|
|
+ g.get(err, disk, info);
|
|
|
+ if(!disk || disk.check){ return } disk.check = 1;
|
|
|
+ var S = +new Date;
|
|
|
+ (info || (info = {})).file || (info.file = g.file);
|
|
|
+ Radix.map(disk, function(val, key){
|
|
|
+ // assume in memory for now, since both write/read already call r.find which will init it.
|
|
|
+ r.find(key, function(file){
|
|
|
+ if((file || (file = opt.code.from)) === info.file){ return }
|
|
|
+ var id = Gun.text.random(3);
|
|
|
+ puff(function(){
|
|
|
+ r.save(key, val, function ack(err, ok){
|
|
|
+ if(err){ r.save(key, val, ack); return } // ad infinitum???
|
|
|
+ // TODO: NOTE!!! Mislocated data could be because of a synchronous `put` from the `g.get(` other than perf shouldn't we do the check first before acking?
|
|
|
+ console.STAT && console.STAT("MISLOCATED DATA CORRECTED", id, ename(key), ename(info.file), ename(file));
|
|
|
+ });
|
|
|
+ },0);
|
|
|
+ })
|
|
|
+ });
|
|
|
+ console.STAT && console.STAT(S, +new Date - S, "rad check");
|
|
|
+ }
|
|
|
+ r.find(key, g.find);
|
|
|
}
|
|
|
function rev(a,b){ return b }
|
|
|
var revo = {reverse: true};
|
|
@@ -310,18 +335,19 @@
|
|
|
with how much performance and scale we can get out of only one.
|
|
|
Then we can work on the harder problem of being multi-process.
|
|
|
*/
|
|
|
+ var RPC = 0;
|
|
|
var Q = {}, s = String.fromCharCode(31);
|
|
|
- r.parse = function(file, cb, raw){ var q;
|
|
|
- if(q = Q[file]){ return q.push(cb) } q = Q[file] = [cb];
|
|
|
- var p = function Parse(){}, info = {};
|
|
|
- p.disk = Radix();
|
|
|
+ r.parse = function(file, cb, raw, DBG){ var q;
|
|
|
+ if(!file){ return cb(); }
|
|
|
+ if(q = Q[file]){ q.push(cb); return } q = Q[file] = [cb];
|
|
|
+ var p = function Parse(){}, info = {file: file};
|
|
|
+ (p.disk = Radix()).file = file;
|
|
|
p.read = function(err, data){ var tmp;
|
|
|
- LOG && opt.log(S, +new Date - S, 'read disk', ename(file));
|
|
|
+ DBG && (DBG.rpg = +new Date);
|
|
|
+ console.STAT && console.STAT(S, +new Date - S, 'read disk', JSON.stringify(file), ++RPC, 'total all parses.');
|
|
|
delete Q[file];
|
|
|
- if((p.err = err) || (p.not = !data)){
|
|
|
- return map(q, p.ack);
|
|
|
- }
|
|
|
- if(typeof data !== 'string'){
|
|
|
+ if((p.err = err) || (p.not = !data)){ p.map(q, p.ack); return }
|
|
|
+ if('string' !== typeof data){
|
|
|
try{
|
|
|
if(opt.pack <= data.length){
|
|
|
p.err = "Chunk too big!";
|
|
@@ -329,29 +355,55 @@
|
|
|
data = data.toString(); // If it crashes, it crashes here. How!?? We check size first!
|
|
|
}
|
|
|
}catch(e){ p.err = e }
|
|
|
- if(p.err){ return map(q, p.ack) }
|
|
|
+ if(p.err){ p.map(q, p.ack); return }
|
|
|
}
|
|
|
info.parsed = data.length;
|
|
|
-
|
|
|
- LOG && (S = +new Date);
|
|
|
- if(opt.jsonify || '{' === data[0]){ // temporary testing idea
|
|
|
+ DBG && (DBG.rpl = info.parsed);
|
|
|
+ DBG && (DBG.rpa = q.length);
|
|
|
+ S = +new Date;
|
|
|
+ if(opt.jsonify || '{' === data[0]){
|
|
|
try{
|
|
|
- var json = JSON.parse(data);
|
|
|
+ var json = JSON.parse(data); // TODO: this caused a out-of-memory crash!
|
|
|
p.disk.$ = json;
|
|
|
- LOG && opt.log(S, +new Date - S, 'rad parsed JSON');
|
|
|
- map(q, p.ack);
|
|
|
+ console.STAT && (ST = +new Date - S) > 9 && console.STAT(S, ST, 'rad parsed JSON');
|
|
|
+ DBG && (DBG.rpd = +new Date);
|
|
|
+ p.map(q, p.ack); // hmmm, v8 profiler can't see into this cause of try/catch?
|
|
|
return;
|
|
|
}catch(e){ tmp = e }
|
|
|
if('{' === data[0]){
|
|
|
p.err = tmp || "JSON error!";
|
|
|
- return map(q, p.ack);
|
|
|
+ p.map(q, p.ack);
|
|
|
+ return;
|
|
|
}
|
|
|
}
|
|
|
- LOG && (S = +new Date);
|
|
|
+ p.radec(err, data);
|
|
|
+ }
|
|
|
+ p.map = function(){
|
|
|
+ if(!q || !q.length){ return }
|
|
|
+ //var i = 0, l = q.length, ack;
|
|
|
+ var S = +new Date;
|
|
|
+ var err = p.err, data = p.not? u : p.disk;
|
|
|
+ var i = 0, ack; while(i < 9 && (ack = q[i++])){ ack(err, data, info) } // too much?
|
|
|
+ console.STAT && console.STAT(S, +new Date - S, 'rad packs', ename(file));
|
|
|
+ console.STAT && console.STAT(S, i, 'rad packs #', ename(file));
|
|
|
+ if(!(q = q.slice(i)).length){ return }
|
|
|
+ puff(p.map, 0);
|
|
|
+ }
|
|
|
+ p.ack = function(cb){
|
|
|
+ if(!cb){ return }
|
|
|
+ if(p.err || p.not){
|
|
|
+ cb(p.err, u, info);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ cb(u, p.disk, info);
|
|
|
+ }
|
|
|
+ p.radec = function(err, data){
|
|
|
+ S = +new Date;
|
|
|
var tmp = p.split(data), pre = [], i, k, v;
|
|
|
if(!tmp || 0 !== tmp[1]){
|
|
|
p.err = "File '"+file+"' does not have root radix! ";
|
|
|
- return map(q, p.ack);
|
|
|
+ p.map(q, p.ack);
|
|
|
+ return;
|
|
|
}
|
|
|
while(tmp){
|
|
|
k = v = u;
|
|
@@ -370,9 +422,8 @@
|
|
|
if(u !== k && u !== v){ p.disk(pre.join(''), v) }
|
|
|
tmp = p.split(tmp[2]);
|
|
|
}
|
|
|
- LOG && opt.log(S, +new Date - S, 'parsed RAD');
|
|
|
- //cb(err, p.disk);
|
|
|
- map(q, p.ack);
|
|
|
+ console.STAT && console.STAT(S, +new Date - S, 'parsed RAD');
|
|
|
+ p.map(q, p.ack);
|
|
|
};
|
|
|
p.split = function(t){
|
|
|
if(!t){ return }
|
|
@@ -385,81 +436,76 @@
|
|
|
l[2] = t.slice(i + o.i);
|
|
|
return l;
|
|
|
}
|
|
|
- p.ack = function(cb){
|
|
|
- if(!cb){ return }
|
|
|
- if(p.err || p.not){ return cb(p.err, u, info) }
|
|
|
- cb(u, p.disk, info);
|
|
|
- }
|
|
|
- var S; LOG && (S = +new Date);
|
|
|
- if(raw){ return p.read(null, raw) }
|
|
|
+ if(r.disk){ raw || (raw = (r.disk[file]||'').raw) }
|
|
|
+ var S = +new Date, SM, SL;
|
|
|
+ DBG && (DBG.rp = S);
|
|
|
+ if(raw){ return puff(function(){ p.read(u, raw) }, 0) }
|
|
|
opt.store.get(ename(file), p.read);
|
|
|
+ // TODO: What if memory disk gets filled with updates, and we get an old one back?
|
|
|
}
|
|
|
}());
|
|
|
|
|
|
;(function(){
|
|
|
- var dir, q, f = String.fromCharCode(28), ef = ename(f);
|
|
|
- r.list = function(cb){
|
|
|
- if(dir){
|
|
|
- var tmp = {reverse: (cb.reverse)? 1 : 0};
|
|
|
- Radix.map(dir, function(val, key){
|
|
|
- return cb(key);
|
|
|
- }, tmp) || cb();
|
|
|
+ var dir, f = String.fromCharCode(28), Q;
|
|
|
+ r.find = function(key, cb){
|
|
|
+ if(!dir){
|
|
|
+ if(Q){ Q.push([key, cb]); return } Q = [[key, cb]];
|
|
|
+ r.parse(f, init);
|
|
|
return;
|
|
|
}
|
|
|
- if(q){ return q.push(cb) } q = [cb];
|
|
|
- r.parse(f, r.list.init);
|
|
|
+ Radix.map(r.list = dir, function(val, key){
|
|
|
+ if(!val){ return }
|
|
|
+ return cb(key) || true;
|
|
|
+ }, {reverse: 1, end: key}) || cb(opt.code.from);
|
|
|
}
|
|
|
- r.list.add = function(file, cb){
|
|
|
+ r.find.add = function(file, cb){
|
|
|
var has = dir(file);
|
|
|
- if(has || file === ef){
|
|
|
- return cb(u, 1);
|
|
|
- }
|
|
|
- dir(file, true);
|
|
|
- cb.listed = (cb.listed || 0) + 1;
|
|
|
+ if(has || file === f){ cb(u, 1); return }
|
|
|
+ dir(file, 1);
|
|
|
+ cb.found = (cb.found || 0) + 1;
|
|
|
r.write(f, dir, function(err, ok){
|
|
|
- if(err){ return cb(err) }
|
|
|
- cb.listed = (cb.listed || 0) - 1;
|
|
|
- if(cb.listed !== 0){ return }
|
|
|
+ if(err){ cb(err); return }
|
|
|
+ cb.found = (cb.found || 0) - 1;
|
|
|
+ if(0 !== cb.found){ return }
|
|
|
cb(u, 1);
|
|
|
}, true);
|
|
|
}
|
|
|
- r.list.init = function(err, disk){
|
|
|
+ r.find.bad = function(file, cb){
|
|
|
+ dir(file, 0);
|
|
|
+ r.write(f, dir, cb||noop);
|
|
|
+ }
|
|
|
+ function init(err, disk){
|
|
|
if(err){
|
|
|
opt.log('list', err);
|
|
|
- setTimeout(function(){ r.parse(f, r.list.init) }, 1000);
|
|
|
- return;
|
|
|
- }
|
|
|
- if(disk){
|
|
|
- r.list.drain(disk);
|
|
|
- return;
|
|
|
- }
|
|
|
- if(!opt.store.list){
|
|
|
- r.list.drain(Radix());
|
|
|
+ setTimeout(function(){ r.parse(f, init) }, 1000);
|
|
|
return;
|
|
|
}
|
|
|
+ if(disk){ drain(disk); return }
|
|
|
+ dir = dir || disk || Radix();
|
|
|
+ if(!opt.store.list){ drain(dir); return }
|
|
|
// import directory.
|
|
|
opt.store.list(function(file){
|
|
|
- dir = dir || Radix();
|
|
|
- if(!file){ return r.list.drain(dir) }
|
|
|
- r.list.add(file, noop);
|
|
|
+ if(!file){ drain(dir); return }
|
|
|
+ r.find.add(file, noop);
|
|
|
});
|
|
|
}
|
|
|
- r.list.drain = function(rad, tmp){
|
|
|
- r.list.dir = dir = rad;
|
|
|
- tmp = q; q = null;
|
|
|
- Gun.list.map(tmp, function(cb){
|
|
|
- r.list(cb);
|
|
|
+ function drain(rad, tmp){
|
|
|
+ dir = dir || rad;
|
|
|
+ dir.file = f;
|
|
|
+ tmp = Q; Q = null;
|
|
|
+ Gun.list.map(tmp, function(arg){
|
|
|
+ r.find(arg[0], arg[1]);
|
|
|
});
|
|
|
}
|
|
|
}());
|
|
|
|
|
|
+ try{ !Gun.window && require('./radmigtmp')(r) }catch(e){}
|
|
|
+
|
|
|
var noop = function(){}, RAD, u;
|
|
|
Radisk.has[opt.file] = r;
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-
|
|
|
;(function(){
|
|
|
var _ = String.fromCharCode(31), u;
|
|
|
Radisk.encode = function(d, o, s){ s = s || _;
|