monotone

monotone Mtn Source Tree

Root/cert.cc

1// copyright (C) 2002, 2003 graydon hoare <graydon@pobox.com>
2// all rights reserved.
3// licensed to the public under the terms of the GNU GPL (>= 2)
4// see the file COPYING for details
5
6#include "cert.hh"
7#include "packet.hh"
8#include "app_state.hh"
9#include "interner.hh"
10#include "keys.hh"
11#include "mac.hh"
12#include "netio.hh"
13#include "sanity.hh"
14#include "patch_set.hh"
15#include "transforms.hh"
16#include "ui.hh"
17
18#include <boost/date_time/gregorian/gregorian.hpp>
19#include <boost/date_time/posix_time/posix_time.hpp>
20#include <boost/dynamic_bitset.hpp>
21#include <boost/lexical_cast.hpp>
22#include <boost/shared_ptr.hpp>
23#include <boost/tuple/tuple.hpp>
24#include <boost/tuple/tuple_comparison.hpp>
25
26#include <string>
27#include <limits>
28#include <sstream>
29#include <vector>
30
31using namespace std;
32using namespace boost;
33
34// cert destroyer!
35
36struct bogus_cert_p
37{
38 app_state & app;
39 bogus_cert_p(app_state & a) : app(a) {};
40
41 bool cert_is_bogus(cert const & c) const
42 {
43 if (check_cert(app, c))
44 {
45 L(F("cert ok\n"));
46 return false;
47 }
48 else
49 {
50 string txt;
51 cert_signable_text(c, txt);
52 W(F("bad signature by '%s' on '%s'\n") % c.key() % txt);
53 return true;
54 }
55 }
56
57 bool operator()(manifest<cert> const & c) const
58 {
59 return cert_is_bogus(c.inner());
60 }
61
62 bool operator()(file<cert> const & c) const
63 {
64 return cert_is_bogus(c.inner());
65 }
66};
67
68
69void erase_bogus_certs(vector< manifest<cert> > & certs,
70 app_state & app)
71{
72 typedef vector< manifest<cert> >::iterator it;
73 it e = remove_if(certs.begin(), certs.end(), bogus_cert_p(app));
74 certs.erase(e, certs.end());
75
76 vector< manifest<cert> > tmp_certs;
77
78 // sorry, this is a crazy data structure
79 typedef tuple< hexenc<id>, cert_name, base64<cert_value> > trust_key;
80 typedef map< trust_key, pair< shared_ptr< set<rsa_keypair_id> >, it > > trust_map;
81 trust_map trust;
82
83 for (it i = certs.begin(); i != certs.end(); ++i)
84 {
85 trust_key key = trust_key(i->inner().ident, i->inner().name, i->inner().value);
86 trust_map::iterator j = trust.find(key);
87 shared_ptr< set<rsa_keypair_id> > s;
88 if (j == trust.end())
89{
90 s.reset(new set<rsa_keypair_id>());
91 trust.insert(make_pair(key, make_pair(s, i)));
92}
93 else
94s = j->second.first;
95 s->insert(i->inner().key);
96 }
97
98 for (trust_map::const_iterator i = trust.begin();
99 i != trust.end(); ++i)
100 {
101 cert_value decoded_value;
102 decode_base64(get<2>(i->first), decoded_value);
103 if (app.lua.hook_get_manifest_cert_trust(*(i->second.first),
104 get<0>(i->first),
105 get<1>(i->first),
106 decoded_value))
107{
108 L(F("trust function liked %d signers of cert '%s'='%s' on manifest '%s'\n")
109 % i->second.first->size() % get<1>(i->first) % decoded_value % get<0>(i->first));
110 tmp_certs.push_back(*(i->second.second));
111}
112 else
113{
114 L(F("trust function disliked %d signers of cert '%s'='%s' on manifest '%s'\n")
115 % i->second.first->size() % get<1>(i->first) % decoded_value % get<0>(i->first));
116}
117 }
118 certs = tmp_certs;
119}
120
121void erase_bogus_certs(vector< file<cert> > & certs,
122 app_state & app)
123{
124 typedef vector< file<cert> >::iterator it;
125 it e = remove_if(certs.begin(), certs.end(), bogus_cert_p(app));
126 certs.erase(e, certs.end());
127
128 vector< file<cert> > tmp_certs;
129
130 // sorry, this is a crazy data structure
131 typedef tuple< hexenc<id>, cert_name, base64<cert_value> > trust_key;
132 typedef map< trust_key, pair< shared_ptr< set<rsa_keypair_id> >, it > > trust_map;
133 trust_map trust;
134
135 for (it i = certs.begin(); i != certs.end(); ++i)
136 {
137 trust_key key = trust_key(i->inner().ident, i->inner().name, i->inner().value);
138 trust_map::iterator j = trust.find(key);
139 shared_ptr< set<rsa_keypair_id> > s;
140 if (j == trust.end())
141{
142 s.reset(new set<rsa_keypair_id>());
143 trust.insert(make_pair(key, make_pair(s, i)));
144}
145 else
146s = j->second.first;
147 s->insert(i->inner().key);
148 }
149
150 for (trust_map::const_iterator i = trust.begin();
151 i != trust.end(); ++i)
152 {
153 cert_value decoded_value;
154 decode_base64(get<2>(i->first), decoded_value);
155 if (app.lua.hook_get_manifest_cert_trust(*(i->second.first),
156 get<0>(i->first),
157 get<1>(i->first),
158 decoded_value))
159{
160 L(F("trust function liked %d signers of cert '%s'='%s' on file '%s'\n")
161 % i->second.first->size() % get<1>(i->first) % decoded_value % get<0>(i->first));
162 tmp_certs.push_back(*(i->second.second));
163}
164 else
165{
166 L(F("trust function disliked %d signers of cert '%s'='%s' on file '%s'\n")
167 % i->second.first->size() % get<1>(i->first) % decoded_value % get<0>(i->first));
168}
169 }
170 certs = tmp_certs;
171}
172
173
174// cert-managing routines
175
176cert::cert()
177{}
178
179cert::cert(hexenc<id> const & ident,
180 cert_name const & name,
181 base64<cert_value> const & value,
182 rsa_keypair_id const & key)
183 : ident(ident), name(name), value(value), key(key)
184{}
185
186cert::cert(hexenc<id> const & ident,
187 cert_name const & name,
188 base64<cert_value> const & value,
189 rsa_keypair_id const & key,
190 base64<rsa_sha1_signature> const & sig)
191 : ident(ident), name(name), value(value), key(key), sig(sig)
192{}
193
194bool cert::operator<(cert const & other) const
195{
196 return (ident < other.ident)
197 || ((ident == other.ident) && value < other.value)
198 || (((ident == other.ident) && value == other.value) && key < other.key)
199 || ((((ident == other.ident) && value == other.value) && key == other.key) && sig < other.sig);
200}
201
202bool cert::operator==(cert const & other) const
203{
204 return
205 (ident == other.ident)
206 && (value == other.value)
207 && (key == other.key)
208 && (sig == other.sig);
209}
210
211// netio support
212
213void read_cert(string const & in, cert & t)
214{
215 size_t pos = 0;
216 id hash = extract_substring(in, pos,
217 constants::merkle_hash_length_in_bytes,
218 "cert hash");
219 id ident = extract_substring(in, pos,
220 constants::merkle_hash_length_in_bytes,
221 "cert ident");
222 string name, val, key, sig;
223 extract_variable_length_string(in, name, pos, "cert name");
224 extract_variable_length_string(in, val, pos, "cert val");
225 extract_variable_length_string(in, key, pos, "cert key");
226 extract_variable_length_string(in, sig, pos, "cert sig");
227 assert_end_of_buffer(in, pos, "cert");
228
229 hexenc<id> hid;
230 base64<cert_value> bval;
231 base64<rsa_sha1_signature> bsig;
232
233 encode_hexenc(ident, hid);
234 encode_base64(cert_value(val), bval);
235 encode_base64(rsa_sha1_signature(sig), bsig);
236
237 cert tmp(hid, cert_name(name), bval, rsa_keypair_id(key), bsig);
238
239 hexenc<id> hcheck;
240 id check;
241 cert_hash_code(tmp, hcheck);
242 decode_hexenc(hcheck, check);
243 if (!(check == hash))
244 {
245 hexenc<id> hhash;
246 encode_hexenc(hash, hhash);
247 throw bad_decode(F("calculated cert hash '%s' does not match '%s'")
248 % hcheck % hhash);
249 }
250 t = tmp;
251}
252
253void write_cert(cert const & t, string & out)
254{
255 string name, key;
256 hexenc<id> hash;
257 id ident_decoded, hash_decoded;
258 rsa_sha1_signature sig_decoded;
259 cert_value value_decoded;
260
261 cert_hash_code(t, hash);
262 decode_base64(t.value, value_decoded);
263 decode_base64(t.sig, sig_decoded);
264 decode_hexenc(t.ident, ident_decoded);
265 decode_hexenc(hash, hash_decoded);
266
267 out.append(hash_decoded());
268 out.append(ident_decoded());
269 insert_variable_length_string(t.name(), out);
270 insert_variable_length_string(value_decoded(), out);
271 insert_variable_length_string(t.key(), out);
272 insert_variable_length_string(sig_decoded(), out);
273}
274
275void cert_signable_text(cert const & t,
276 string & out)
277{
278 out = (F("[%s@%s:%s]") % t.name % t.ident % remove_ws(t.value())).str();
279 L(F("cert: signable text %s\n") % out);
280}
281
282void cert_hash_code(cert const & t, hexenc<id> & out)
283{
284 string tmp(t.ident()
285 + ":" + t.name()
286 + ":" + remove_ws(t.value())
287 + ":" + t.key()
288 + ":" + remove_ws(t.sig()));
289 data tdat(tmp);
290 calculate_ident(tdat, out);
291}
292
293void calculate_cert(app_state & app, cert & t)
294{
295 string signed_text;
296 base64< arc4<rsa_priv_key> > priv;
297 cert_signable_text(t, signed_text);
298
299 static std::map<rsa_keypair_id, base64< arc4<rsa_priv_key> > > privkeys;
300 bool persist_ok = (!privkeys.empty()) || app.lua.hook_persist_phrase_ok();
301
302 if (persist_ok
303 && privkeys.find(t.key) != privkeys.end())
304 {
305 priv = privkeys[t.key];
306 }
307 else
308 {
309 N(app.db.private_key_exists(t.key),
310F("no private key '%s' found in database") % t.key);
311 app.db.get_key(t.key, priv);
312 if (persist_ok)
313privkeys.insert(make_pair(t.key, priv));
314 }
315
316 make_signature(app.lua, t.key, priv, signed_text, t.sig);
317}
318
319bool check_cert(app_state & app, cert const & t)
320{
321
322 base64< rsa_pub_key > pub;
323
324 static std::map<rsa_keypair_id, base64< rsa_pub_key > > pubkeys;
325 bool persist_ok = (!pubkeys.empty()) || app.lua.hook_persist_phrase_ok();
326
327 if (persist_ok
328 && pubkeys.find(t.key) != pubkeys.end())
329 {
330 pub = pubkeys[t.key];
331 }
332 else
333 {
334 if (!app.db.public_key_exists(t.key))
335return false;
336 app.db.get_key(t.key, pub);
337 if (persist_ok)
338pubkeys.insert(make_pair(t.key, pub));
339 }
340
341 string signed_text;
342 cert_signable_text(t, signed_text);
343 return check_signature(app.lua, t.key, pub, signed_text, t.sig);
344}
345
346
347// "special certs"
348
349string const ancestor_cert_name("ancestor");
350string const branch_cert_name("branch");
351
352bool guess_default_key(rsa_keypair_id & key, app_state & app)
353{
354
355 if (app.signing_key() != "")
356 {
357 key = app.signing_key;
358 return true;
359 }
360
361 if (app.branch_name() != "")
362 {
363 cert_value branch(app.branch_name());
364 if (app.lua.hook_get_branch_key(branch, key))
365return true;
366 }
367
368 vector<rsa_keypair_id> all_privkeys;
369 app.db.get_private_keys(all_privkeys);
370 if (all_privkeys.size() != 1)
371 return false;
372 else
373 {
374 key = all_privkeys[0];
375 return true;
376 }
377}
378
379void guess_branch(manifest_id const & id,
380 app_state & app,
381 cert_value & branchname)
382{
383 if (app.branch_name() != "")
384 {
385 branchname = app.branch_name();
386 }
387 else
388 {
389 vector< manifest<cert> > certs;
390 cert_name branch(branch_cert_name);
391 app.db.get_manifest_certs(id, branch, certs);
392 erase_bogus_certs(certs, app);
393
394 N(certs.size() != 0,
395F("no branch certs found for manifest %s, "
396 "please provide a branch name") % id);
397
398 N(certs.size() == 1,
399F("multiple branch certs found for manifest %s, "
400 "please provide a branch name") % id);
401
402 decode_base64(certs[0].inner().value, branchname);
403 }
404}
405
406void make_simple_cert(hexenc<id> const & id,
407 cert_name const & nm,
408 cert_value const & cv,
409 app_state & app,
410 cert & c)
411{
412 rsa_keypair_id key;
413 N(guess_default_key(key,app),
414 F("no unique private key for cert construction"));
415 base64<cert_value> encoded_val;
416 encode_base64(cv, encoded_val);
417 cert t(id, nm, encoded_val, key);
418 calculate_cert(app, t);
419 c = t;
420}
421
422
423static void put_simple_manifest_cert(manifest_id const & id,
424 cert_name const & nm,
425 cert_value const & val,
426 app_state & app,
427 packet_consumer & pc)
428{
429 cert t;
430 make_simple_cert(id.inner(), nm, val, app, t);
431 manifest<cert> cc(t);
432 pc.consume_manifest_cert(cc);
433}
434
435static void put_simple_file_cert(file_id const & id,
436 cert_name const & nm,
437 cert_value const & val,
438 app_state & app,
439 packet_consumer & pc)
440{
441 cert t;
442 make_simple_cert(id.inner(), nm, val, app, t);
443 file<cert> fc(t);
444 pc.consume_file_cert(fc);
445}
446
447void cert_manifest_in_branch(manifest_id const & man,
448 cert_value const & branchname,
449 app_state & app,
450 packet_consumer & pc)
451{
452 put_simple_manifest_cert (man, branch_cert_name,
453 branchname, app, pc);
454}
455
456
457static void get_parents(manifest_id const & child,
458set<manifest_id> & parents,
459app_state & app)
460{
461 vector< manifest<cert> > certs;
462 parents.clear();
463 app.db.get_manifest_certs(child, ancestor_cert_name, certs);
464 erase_bogus_certs(certs, app);
465 for(vector< manifest<cert> >::const_iterator i = certs.begin();
466 i != certs.end(); ++i)
467 {
468 cert_value tv;
469 decode_base64(i->inner().value, tv);
470 manifest_id parent(tv());
471 vector< manifest<cert> > disapprove_certs;
472 app.db.get_manifest_certs(child, disapproval_cert_name,
473i->inner().value, disapprove_certs);
474 erase_bogus_certs(disapprove_certs, app);
475 if (disapprove_certs.empty())
476parents.insert(parent);
477 }
478}
479
480
481static bool find_relevant_edges(manifest_id const & ancestor,
482manifest_id const & child,
483app_state & app,
484multimap <manifest_id, manifest_id> & relevant_edges,
485set<manifest_id> & visited_nodes)
486{
487 if (ancestor == child)
488 return true;
489
490 visited_nodes.insert(child);
491
492 set<manifest_id> parents;
493 get_parents(child, parents, app);
494 if (parents.size() == 0)
495 return false;
496
497 bool relevant_child = false;
498
499 for(set<manifest_id>::const_iterator i = parents.begin();
500 i != parents.end(); ++i)
501 {
502 if (relevant_edges.find(*i) != relevant_edges.end())
503{
504 // edge was already deemed relevant; don't re-traverse!
505 relevant_child = true;
506}
507 else if (visited_nodes.find(*i) != visited_nodes.end())
508{
509 // node was visited (and presumably deemed irrelevant);
510 // don't re-traverse!
511}
512 else if (find_relevant_edges(ancestor, *i, app,
513 relevant_edges, visited_nodes))
514{
515 relevant_child = true;
516 relevant_edges.insert(make_pair(child, *i));
517}
518 }
519
520 return relevant_child;
521}
522
523
524void write_ancestry_paths(manifest_id const & ancestor,
525 manifest_id const & begin,
526 app_state & app,
527 packet_consumer & pc)
528{
529
530 typedef multimap < manifest_id, manifest_id > emap;
531 typedef pair< shared_ptr<data>, shared_ptr<manifest_map> > frontier_entry;
532 typedef map<manifest_id, frontier_entry> fmap;
533
534 shared_ptr<fmap> frontier(new fmap());
535 shared_ptr<fmap> next_frontier(new fmap());
536 emap relevant_edges;
537 set<manifest_id> visited;
538
539 find_relevant_edges(ancestor, begin, app, relevant_edges, visited);
540
541 shared_ptr<data> begin_data(new data());
542 shared_ptr<manifest_map> begin_map(new manifest_map());
543 {
544 manifest_data mdat;
545 app.db.get_manifest_version(begin, mdat);
546 unpack(mdat.inner(), *begin_data);
547 }
548 read_manifest_map(*begin_data, *begin_map);
549
550 P(F("writing %d historical edges\n") % relevant_edges.size());
551 ticker n_edges("edges");
552
553 frontier->insert(make_pair(begin, make_pair(begin_data, begin_map)));
554
555 while (!frontier->empty())
556 {
557 for (fmap::const_iterator child = frontier->begin();
558 child != frontier->end(); ++child)
559{
560 manifest_id child_id = child->first;
561
562 pair<emap::const_iterator, emap::const_iterator> range;
563 shared_ptr<data> child_data = child->second.first;
564 shared_ptr<manifest_map> child_map = child->second.second;
565
566 range = relevant_edges.equal_range(child_id);
567
568 for (emap::const_iterator edge = range.first;
569 edge != range.second; ++edge)
570 {
571 manifest_id parent_id = edge->second;
572
573 L(F("queueing edge %s -> %s\n") % parent_id % child_id);
574
575 // queue all the certs for this parent
576 vector< manifest<cert> > certs;
577 app.db.get_manifest_certs(parent_id, certs);
578 for(vector< manifest<cert> >::const_iterator cert = certs.begin();
579 cert != certs.end(); ++cert)
580pc.consume_manifest_cert(*cert);
581
582 // construct the parent
583 shared_ptr<data> parent_data(new data());
584 shared_ptr<manifest_map> parent_map(new manifest_map());
585
586 if (app.db.manifest_delta_exists(child_id, parent_id))
587app.db.compute_older_version(child_id, parent_id,
588 *child_data, *parent_data);
589 else
590{
591 manifest_data mdata;
592 app.db.get_manifest_version(parent_id, mdata);
593 unpack(mdata.inner(), *parent_data);
594}
595
596 ++n_edges;
597
598 read_manifest_map(*parent_data, *parent_map);
599
600 // queue the delta to the parent
601 patch_set ps;
602 manifests_to_patch_set(*parent_map, *child_map, app, ps);
603 patch_set_to_packets(ps, app, pc);
604
605 // store the parent for the next cycle
606 next_frontier->insert
607(make_pair(parent_id, make_pair(parent_data, parent_map)));
608 }
609}
610 swap(frontier, next_frontier);
611 next_frontier->clear();
612 }
613}
614
615// nb: "heads" only makes sense in the context of manifests (at the
616// moment). we'll see if anyone cares to try branch certs on files. it
617// doesn't sound terribly useful, but who knows.
618
619void get_branch_heads(cert_value const & branchname,
620 app_state & app,
621 set<manifest_id> & heads)
622{
623 heads.clear();
624 vector< manifest<cert> > branch_certs, ancestor_certs;
625 base64<cert_value> branch_encoded;
626 encode_base64(branchname, branch_encoded);
627
628 P(F("fetching heads of branch '%s'\n") % branchname);
629
630 app.db.get_head_candidates(branch_encoded(), branch_certs, ancestor_certs);
631 erase_bogus_certs(branch_certs, app);
632 erase_bogus_certs(ancestor_certs, app);
633
634 for (vector< manifest<cert> >::const_iterator i = branch_certs.begin();
635 i != branch_certs.end(); ++i)
636 {
637 heads.insert(i->inner().ident);
638 }
639
640 L(F("began with %d candidate heads\n") % heads.size());
641
642 // Remove every manifest with descendents.
643 for (vector< manifest<cert> >::const_iterator i = ancestor_certs.begin();
644 i != ancestor_certs.end(); ++i)
645 {
646 // skip those invalidated by a specific disapproval
647 vector< manifest<cert> > disapprove_certs;
648 app.db.get_manifest_certs(i->inner().ident, disapproval_cert_name,
649i->inner().value, disapprove_certs);
650 erase_bogus_certs(disapprove_certs, app);
651 if (! disapprove_certs.empty())
652continue;
653
654 cert_value tv;
655 decode_base64(i->inner().value, tv);
656 manifest_id parent(tv());
657 set<manifest_id>::const_iterator j = heads.find(parent);
658 if (j != heads.end())
659{
660 heads.erase(j);
661}
662 }
663
664 L(F("reduced to %d heads\n") % heads.size());
665}
666
667void cert_file_ancestor(file_id const & parent,
668file_id const & child,
669app_state & app,
670packet_consumer & pc)
671{
672 if (parent == child)
673 {
674 W(F("parent file %d is same as child, skipping edge\n") % parent);
675 return;
676 }
677 put_simple_file_cert (child, ancestor_cert_name,
678parent.inner()(), app, pc);
679}
680
681void cert_manifest_ancestor(manifest_id const & parent,
682 manifest_id const & child,
683 app_state & app,
684 packet_consumer & pc)
685{
686 if (parent == child)
687 {
688 W(F("parent manifest %d is same as child, skipping edge\n") % parent);
689 return;
690 }
691 put_simple_manifest_cert (child, ancestor_cert_name,
692 parent.inner()(), app, pc);
693}
694
695
696// calculating least common ancestors is a delicate thing.
697//
698// it turns out that we cannot choose the simple "least common ancestor"
699// for purposes of a merge, because it is possible that there are two
700// equally reachable common ancestors, and this produces ambiguity in the
701// merge. the result -- in a pathological case -- is silently accepting one
702// set of edits while discarding another; not exactly what you want a
703// version control tool to do.
704//
705// a conservative approximation, is what we'll call a "subgraph recurring"
706// LCA algorithm. this is somewhat like locating the least common dominator
707// node, but not quite. it is actually just a vanilla LCA search, except
708// that any time there's a fork (a historical merge looks like a fork from
709// our perspective, working backwards from children to parents) it reduces
710// the fork to a common parent via a sequence of pairwise recursive calls
711// to itself before proceeding. this will always resolve to a common parent
712// with no ambiguity, unless it falls off the root of the graph.
713//
714// unfortunately the subgraph recurring algorithm sometimes goes too far
715// back in history -- for example if there is an unambiguous propagate from
716// one branch to another, the entire subgraph preceeding the propagate on
717// the recipient branch is elided, since it is a merge.
718//
719// our current hypothesis is that the *exact* condition we're looking for,
720// when doing a merge, is the least node which dominates one side of the
721// merge and is an ancestor of the other.
722
723static void
724ensure_parents_loaded(unsigned long man,
725 map< unsigned long, shared_ptr< dynamic_bitset<> > > & parents,
726 interner<unsigned long> & intern,
727 app_state & app)
728{
729 if (parents.find(man) != parents.end())
730 return;
731
732 L(F("loading parents for node %d\n") % man);
733
734 set<manifest_id> imm_parents;
735 get_parents(manifest_id(intern.lookup(man)), imm_parents, app);
736
737 shared_ptr< dynamic_bitset<> > bits =
738 shared_ptr< dynamic_bitset<> >(new dynamic_bitset<>(parents.size()));
739
740 for (set<manifest_id>::const_iterator p = imm_parents.begin();
741 p != imm_parents.end(); ++p)
742 {
743 unsigned long pn = intern.intern(p->inner()());
744 L(F("parent %s -> node %d\n") % *p % pn);
745 if (pn >= bits->size())
746bits->resize(pn+1);
747 bits->set(pn);
748 }
749
750 parents.insert(make_pair(man, bits));
751}
752
753static bool
754expand_dominators(map< unsigned long, shared_ptr< dynamic_bitset<> > > & parents,
755 map< unsigned long, shared_ptr< dynamic_bitset<> > > & dominators,
756 interner<unsigned long> & intern,
757 app_state & app)
758{
759 bool something_changed = false;
760 vector<unsigned long> nodes;
761
762 nodes.reserve(dominators.size());
763
764 // pass 1, pull out all the node numbers we're going to scan this time around
765 for (map< unsigned long, shared_ptr< dynamic_bitset<> > >::const_iterator e = dominators.begin();
766 e != dominators.end(); ++e)
767 nodes.push_back(e->first);
768
769 // pass 2, update any of the dominator entries we can
770 for (vector<unsigned long>::const_iterator n = nodes.begin(); n != nodes.end(); ++n)
771 {
772 shared_ptr< dynamic_bitset<> > bits = dominators[*n];
773 dynamic_bitset<> saved(*bits);
774 if (bits->size() <= *n)
775bits->resize(*n + 1);
776 bits->set(*n);
777
778 ensure_parents_loaded(*n, parents, intern, app);
779 shared_ptr< dynamic_bitset<> > n_parents = parents[*n];
780
781 dynamic_bitset<> intersection(bits->size());
782
783 bool first = true;
784 for (unsigned long parent = 0; parent != n_parents->size(); ++parent)
785{
786 if (! n_parents->test(parent))
787 continue;
788
789 if (dominators.find(parent) == dominators.end())
790 dominators.insert(make_pair(parent,
791shared_ptr< dynamic_bitset<> >(new dynamic_bitset<>())));
792 shared_ptr< dynamic_bitset<> > pbits = dominators[parent];
793 if (bits->size() > pbits->size())
794 pbits->resize(bits->size());
795 if (first)
796 {
797 intersection = (*pbits);
798 first = false;
799 }
800 else
801 intersection &= (*pbits);
802}
803
804 (*bits) |= intersection;
805 if (*bits != saved)
806something_changed = true;
807 }
808 return something_changed;
809}
810
811
812static bool
813expand_ancestors(map< unsigned long, shared_ptr< dynamic_bitset<> > > & parents,
814 map< unsigned long, shared_ptr< dynamic_bitset<> > > & ancestors,
815 interner<unsigned long> & intern,
816 app_state & app)
817{
818 bool something_changed = false;
819 vector<unsigned long> nodes;
820
821 nodes.reserve(ancestors.size());
822
823 // pass 1, pull out all the node numbers we're going to scan this time around
824 for (map< unsigned long, shared_ptr< dynamic_bitset<> > >::const_iterator e = ancestors.begin();
825 e != ancestors.end(); ++e)
826 nodes.push_back(e->first);
827
828 // pass 2, update any of the ancestor entries we can
829 for (vector<unsigned long>::const_iterator n = nodes.begin(); n != nodes.end(); ++n)
830 {
831 shared_ptr< dynamic_bitset<> > bits = ancestors[*n];
832 dynamic_bitset<> saved(*bits);
833 if (bits->size() <= *n)
834bits->resize(*n + 1);
835 bits->set(*n);
836
837 ensure_parents_loaded(*n, parents, intern, app);
838 shared_ptr< dynamic_bitset<> > n_parents = parents[*n];
839 for (unsigned long parent = 0; parent != n_parents->size(); ++parent)
840{
841 if (! n_parents->test(parent))
842 continue;
843
844 if (bits->size() <= parent)
845 bits->resize(parent + 1);
846 bits->set(parent);
847
848 if (ancestors.find(parent) == ancestors.end())
849 ancestors.insert(make_pair(parent,
850shared_ptr< dynamic_bitset<> >(new dynamic_bitset<>())));
851 shared_ptr< dynamic_bitset<> > pbits = ancestors[parent];
852 if (bits->size() > pbits->size())
853 pbits->resize(bits->size());
854 (*bits) |= (*pbits);
855}
856
857 if (*bits != saved)
858something_changed = true;
859 }
860 return something_changed;
861}
862
863static bool
864find_intersecting_node(dynamic_bitset<> & fst,
865 dynamic_bitset<> & snd,
866 interner<unsigned long> const & intern,
867 manifest_id & anc)
868{
869
870 if (fst.size() > snd.size())
871 snd.resize(fst.size());
872 else if (snd.size() > fst.size())
873 fst.resize(snd.size());
874
875 dynamic_bitset<> intersection = fst & snd;
876 if (intersection.any())
877 {
878 L(F("found %d intersecting nodes") % intersection.count());
879 for (unsigned long i = 0; i < intersection.size(); ++i)
880{
881 if (intersection.test(i))
882 {
883 anc = manifest_id(intern.lookup(i));
884 return true;
885 }
886}
887 }
888 return false;
889}
890
891// static void
892// dump_bitset_map(string const & hdr,
893// map< unsigned long, shared_ptr< dynamic_bitset<> > > const & mm)
894// {
895// L(F("dumping [%s] (%d entries)\n") % hdr % mm.size());
896// for (map< unsigned long, shared_ptr< dynamic_bitset<> > >::const_iterator i = mm.begin();
897// i != mm.end(); ++i)
898// {
899// L(F("dump [%s]: %d -> %s\n") % hdr % i->first % (*(i->second)));
900// }
901// }
902
903bool find_common_ancestor(manifest_id const & left,
904 manifest_id const & right,
905 manifest_id & anc,
906 app_state & app)
907{
908 interner<unsigned long> intern;
909 map< unsigned long, shared_ptr< dynamic_bitset<> > >
910 parents, ancestors, dominators;
911
912 unsigned long ln = intern.intern(left.inner()());
913 unsigned long rn = intern.intern(right.inner()());
914
915 shared_ptr< dynamic_bitset<> > lanc = shared_ptr< dynamic_bitset<> >(new dynamic_bitset<>());
916 shared_ptr< dynamic_bitset<> > ranc = shared_ptr< dynamic_bitset<> >(new dynamic_bitset<>());
917 shared_ptr< dynamic_bitset<> > ldom = shared_ptr< dynamic_bitset<> >(new dynamic_bitset<>());
918 shared_ptr< dynamic_bitset<> > rdom = shared_ptr< dynamic_bitset<> >(new dynamic_bitset<>());
919
920 ancestors.insert(make_pair(ln, lanc));
921 ancestors.insert(make_pair(rn, ranc));
922 dominators.insert(make_pair(ln, ldom));
923 dominators.insert(make_pair(rn, rdom));
924
925 L(F("searching for common ancestor, left=%s right=%s\n") % left % right);
926
927 while (expand_ancestors(parents, ancestors, intern, app) ||
928 expand_dominators(parents, dominators, intern, app))
929 {
930 L(F("common ancestor scan [par=%d,anc=%d,dom=%d]\n") %
931parents.size() % ancestors.size() % dominators.size());
932
933 if (find_intersecting_node(*lanc, *rdom, intern, anc))
934{
935 L(F("found node %d, ancestor of left %s and dominating right %s\n")
936 % anc % left % right);
937 return true;
938}
939
940 else if (find_intersecting_node(*ranc, *ldom, intern, anc))
941{
942 L(F("found node %d, ancestor of right %s and dominating left %s\n")
943 % anc % right % left);
944 return true;
945}
946 }
947 // dump_bitset_map("ancestors", ancestors);
948 // dump_bitset_map("dominators", dominators);
949 // dump_bitset_map("parents", parents);
950 return false;
951}
952
953
954// stuff for handling rename certs / rename edges
955
956// rename edges associate a particular name mapping with a particular edge
957// in the ancestry graph. they assist the algorithm in patch_set.cc in
958// determining which add/del pairs count as moves.
959
960rename_edge::rename_edge(rename_edge const & other)
961{
962 parent = other.parent;
963 child = other.child;
964 mapping = other.mapping;
965}
966
967static void include_rename_edge(rename_edge const & in,
968rename_edge & out)
969{
970 L(F("merging rename edge %s -> %s with %s -> %s\n")
971 % in.parent % in.child % out.parent % out.child);
972
973 set<file_path> rename_targets;
974 for (rename_set::const_iterator i = out.mapping.begin();
975 i != out.mapping.end(); ++i)
976 {
977 I(rename_targets.find(i->second) == rename_targets.end());
978 rename_targets.insert(i->second);
979 }
980
981 for (rename_set::const_iterator i = in.mapping.begin();
982 i != in.mapping.end(); ++i)
983 {
984 rename_set::const_iterator other = out.mapping.find(i->first);
985 if (other == out.mapping.end())
986I(rename_targets.find(i->second) == rename_targets.end());
987 else
988N(other->second == i->second,
989 F("impossible historical record of renames: %s renamed to both %s and %s")
990 % i->first % i->second % other->second);
991
992 L(F("merged in rename of %s -> %s\n")
993% i->first % i->second);
994 rename_targets.insert(i->second);
995 out.mapping.insert(*i);
996 }
997}
998
999static void compose_rename_edges(rename_edge const & a,
1000 rename_edge const & b,
1001 rename_edge & out)
1002{
1003 I(a.child == b.parent);
1004 out.mapping.clear();
1005 out.parent = a.parent;
1006 out.child = b.child;
1007 set<file_path> rename_targets;
1008
1009 L(F("composing rename edges %s -> %s and %s -> %s\n")
1010 % a.parent % a.child % b.parent % b.child);
1011
1012 for (rename_set::const_iterator i = a.mapping.begin();
1013 i != a.mapping.end(); ++i)
1014 {
1015 I(rename_targets.find(i->second) == rename_targets.end());
1016 I(out.mapping.find(i->first) == out.mapping.end());
1017 rename_targets.insert(i->second);
1018
1019 rename_set::const_iterator j = b.mapping.find(i->second);
1020 if (j != b.mapping.end())
1021{
1022 L(F("composing rename %s -> %s with %s -> %s\n")
1023 % i->first % i->second % j->first % j->second);
1024 out.mapping.insert(make_pair(i->first, j->second));
1025}
1026 else
1027{
1028 L(F("composing lone rename %s -> %s\n")
1029 % i->first % i->second);
1030 out.mapping.insert(*i);
1031}
1032 }
1033}
1034
1035static void write_rename_edge(rename_edge const & edge,
1036 string & val)
1037{
1038 ostringstream oss;
1039 gzip<data> compressed;
1040 oss << edge.parent << "\n";
1041 for (rename_set::const_iterator i = edge.mapping.begin();
1042 i != edge.mapping.end(); ++i)
1043 {
1044 oss << i->first << "\n" << i->second << "\n";
1045 }
1046 encode_gzip(data(oss.str()), compressed);
1047 val = compressed();
1048}
1049
1050static void read_rename_edge(hexenc<id> const & node,
1051 base64<cert_value> const & val,
1052 rename_edge & edge)
1053{
1054 edge.child = manifest_id(node);
1055 cert_value decoded;
1056 data decompressed_data;
1057 string decompressed;
1058 decode_base64(val, decoded);
1059 decode_gzip(gzip<data>(decoded()), decompressed_data);
1060 decompressed = decompressed_data();
1061
1062 vector<string> lines;
1063 split_into_lines(decompressed, lines);
1064
1065 N(lines.size() >= 1 && lines.size() % 2 == 1,
1066 F("malformed rename cert"));
1067
1068 edge.parent = manifest_id(idx(lines, 0));
1069 set<file_path> rename_targets;
1070
1071 for (size_t i = 1; i+1 < lines.size(); ++i)
1072 {
1073 std::string src(idx(lines, i));
1074 ++i;
1075 std::string dst(idx(lines, i));
1076 N(edge.mapping.find(file_path(src)) == edge.mapping.end(),
1077 F("duplicate rename src entry for %s") % src);
1078 N(rename_targets.find(file_path(dst)) == rename_targets.end(),
1079 F("duplicate rename dst entry for %s") % dst);
1080 rename_targets.insert(file_path(dst));
1081 edge.mapping.insert(make_pair(file_path(src), file_path(dst)));
1082 }
1083}
1084
1085/*
1086 * The idea with this algorithm is to walk from child up to ancestor,
1087 * recursively, accumulating all the rename_edges associated with
1088 * intermediate nodes into *one big rename_edge*. don't confuse an ancestry
1089 * edge with a rename edge here: when we get_parents, that's loading
1090 * ancestry edges. rename edges are a secondary graph overlaid on some --
1091 * but not all -- edges in the ancestry graph. I know, it's a real party.
1092 *
1093 * clever readers will realize this is an overlapping-subproblem type
1094 * situation (as is the relevant_edges algorithm later on) and thus needs
1095 * to keep a dynamic programming map to keep itself in linear complexity.
1096 *
1097 * in fact, we keep two: one which maps to computed results (partial_edges)
1098 * and one which just keeps a set of all nodes we traversed
1099 * (visited_nodes). in theory it could be one map with an extra bool stuck
1100 * on each entry, but I think that would make it even less readable. it's
1101 * already atrocious.
1102 */
1103
1104static bool calculate_renames_recursive(manifest_id const & ancestor,
1105manifest_id const & child,
1106app_state & app,
1107rename_edge & edge,
1108map<manifest_id, shared_ptr<rename_edge> > & partial_edges,
1109set<manifest_id> & visited_nodes)
1110{
1111
1112 if (ancestor == child)
1113 return false;
1114
1115 visited_nodes.insert(child);
1116
1117 set<manifest_id> parents;
1118 get_parents(child, parents, app);
1119 bool relevant_child = false;
1120
1121 edge.child = child;
1122 map<manifest_id, rename_edge> incident_edges;
1123
1124 rename_edge child_edge;
1125 vector< manifest<cert> > certs;
1126 app.db.get_manifest_certs(child, cert_name(rename_cert_name), certs);
1127 erase_bogus_certs(certs, app);
1128
1129 L(F("found %d incident rename edges at node %s\n")
1130 % certs.size() % child);
1131
1132 for(vector< manifest<cert> >::const_iterator j = certs.begin();
1133 j != certs.end(); ++j)
1134 {
1135 rename_edge curr_child_edge;
1136 read_rename_edge(j->inner().ident, j->inner().value, curr_child_edge);
1137 incident_edges.insert(make_pair(curr_child_edge.parent, curr_child_edge));
1138 relevant_child = true;
1139 }
1140
1141 L(F("exploring renames from parents of %s, seeking towards %s\n")
1142 % child % ancestor);
1143
1144 for(set<manifest_id>::const_iterator i = parents.begin();
1145 i != parents.end(); ++i)
1146 {
1147
1148 bool relevant_parent = false;
1149 rename_edge curr_parent_edge;
1150 map<manifest_id, shared_ptr<rename_edge> >::const_iterator
1151j = partial_edges.find(*i);
1152 if (j != partial_edges.end())
1153{
1154 // a recursive call has traversed this parent before and found an
1155 // existing rename edge. just reuse that rather than re-traversing
1156 curr_parent_edge = *(j->second);
1157 relevant_parent = true;
1158}
1159 else if (visited_nodes.find(*i) != visited_nodes.end())
1160{
1161 // a recursive call has traversed this parent, but there was no
1162 // rename edge on it, so the parent is irrelevant. skip.
1163 relevant_parent = false;
1164}
1165 else
1166relevant_parent = calculate_renames_recursive(ancestor, *i, app,
1167 curr_parent_edge,
1168 partial_edges,
1169 visited_nodes);
1170
1171 if (relevant_parent)
1172{
1173 map<manifest_id, rename_edge>::const_iterator inc = incident_edges.find(*i);
1174 if (inc != incident_edges.end())
1175 {
1176 L(F("ancestor edge %s -> %s is relevant, composing with edge %s -> %s\n")
1177% curr_parent_edge.parent % curr_parent_edge.child
1178% inc->second.parent % inc->second.child);
1179 rename_edge tmp;
1180 compose_rename_edges(curr_parent_edge, inc->second, tmp);
1181 include_rename_edge(tmp, edge);
1182 incident_edges.erase(*i);
1183 }
1184 else
1185 {
1186 L(F("ancestor edge %s -> %s is relevant, merging with current\n") % (*i) % child);
1187 include_rename_edge(curr_parent_edge, edge);
1188 }
1189 relevant_child = true;
1190}
1191 }
1192
1193 // copy any remaining incident edges
1194 for (map<manifest_id, rename_edge>::const_iterator i = incident_edges.begin();
1195 i != incident_edges.end(); ++i)
1196 {
1197 relevant_child = true;
1198 L(F("adding lone incident edge %s -> %s\n")
1199% i->second.parent % i->second.child);
1200 include_rename_edge(i->second, edge);
1201 }
1202
1203 // store the partial edge from ancestor -> child, so that if anyone
1204 // re-traverses this edge they'll just fetch from the partial_edges
1205 // cache.
1206 if (relevant_child)
1207 partial_edges.insert(make_pair(child, shared_ptr<rename_edge>(new rename_edge(edge))));
1208
1209 return relevant_child;
1210}
1211
1212void calculate_renames(manifest_id const & ancestor,
1213 manifest_id const & child,
1214 app_state & app,
1215 rename_edge & edge)
1216{
1217 // it's ok if we can't find any paths
1218 set<manifest_id> visited;
1219 map<manifest_id, shared_ptr<rename_edge> > partial;
1220 calculate_renames_recursive(ancestor, child, app, edge, partial, visited);
1221}
1222
1223
1224// "standard certs"
1225
1226string const date_cert_name = "date";
1227string const author_cert_name = "author";
1228string const tag_cert_name = "tag";
1229string const changelog_cert_name = "changelog";
1230string const comment_cert_name = "comment";
1231string const disapproval_cert_name = "disapproval";
1232string const testresult_cert_name = "testresult";
1233string const rename_cert_name = "rename";
1234string const vcheck_cert_name = "vcheck";
1235
1236
1237static
1238void cert_manifest_date(manifest_id const & m,
1239boost::posix_time::ptime t,
1240app_state & app,
1241packet_consumer & pc)
1242{
1243 string val = boost::posix_time::to_iso_extended_string(t);
1244 put_simple_manifest_cert(m, date_cert_name, val, app, pc);
1245}
1246
1247void cert_manifest_date_time(manifest_id const & m,
1248 time_t t,
1249 app_state & app,
1250 packet_consumer & pc)
1251{
1252 // make sure you do all your CVS conversions by 2038!
1253 boost::posix_time::ptime tmp(boost::gregorian::date(1970,1,1),
1254 boost::posix_time::seconds(static_cast<long>(t)));
1255 cert_manifest_date(m, tmp, app, pc);
1256}
1257
1258void cert_manifest_date_now(manifest_id const & m,
1259 app_state & app,
1260 packet_consumer & pc)
1261{
1262 cert_manifest_date(m, boost::posix_time::second_clock::universal_time(), app, pc);
1263}
1264
1265void cert_manifest_author(manifest_id const & m,
1266 string const & author,
1267 app_state & app,
1268 packet_consumer & pc)
1269{
1270 put_simple_manifest_cert(m, author_cert_name, author, app, pc);
1271}
1272
1273void cert_manifest_author_default(manifest_id const & m,
1274 app_state & app,
1275 packet_consumer & pc)
1276{
1277 string author;
1278 N(app.lua.hook_get_author(app.branch_name(), author),
1279 F("no default author name for branch '%s'") % app.branch_name);
1280 put_simple_manifest_cert(m, author_cert_name, author, app, pc);
1281}
1282
1283void cert_manifest_tag(manifest_id const & m,
1284 string const & tagname,
1285 app_state & app,
1286 packet_consumer & pc)
1287{
1288 put_simple_manifest_cert(m, tag_cert_name, tagname, app, pc);
1289}
1290
1291
1292void cert_manifest_changelog(manifest_id const & m,
1293 string const & changelog,
1294 app_state & app,
1295 packet_consumer & pc)
1296{
1297 put_simple_manifest_cert(m, changelog_cert_name, changelog, app, pc);
1298}
1299
1300void cert_file_comment(file_id const & f,
1301 string const & comment,
1302 app_state & app,
1303 packet_consumer & pc)
1304{
1305 put_simple_file_cert(f, comment_cert_name, comment, app, pc);
1306}
1307
1308void cert_manifest_comment(manifest_id const & m,
1309 string const & comment,
1310 app_state & app,
1311 packet_consumer & pc)
1312{
1313 put_simple_manifest_cert(m, comment_cert_name, comment, app, pc);
1314}
1315
1316void cert_file_approval(file_id const & f1,
1317file_id const & f2,
1318bool const approval,
1319app_state & app,
1320packet_consumer & pc)
1321{
1322 if (approval)
1323 put_simple_file_cert(f2, ancestor_cert_name, f1.inner()(), app, pc);
1324 else
1325 put_simple_file_cert(f2, disapproval_cert_name, f1.inner()(), app, pc);
1326}
1327
1328void cert_manifest_approval(manifest_id const & m1,
1329 manifest_id const & m2,
1330 bool const approval,
1331 app_state & app,
1332 packet_consumer & pc)
1333{
1334 if (approval)
1335 put_simple_manifest_cert(m2, ancestor_cert_name, m1.inner()(), app, pc);
1336 else
1337 put_simple_manifest_cert(m2, disapproval_cert_name, m1.inner()(), app, pc);
1338}
1339
1340void cert_manifest_testresult(manifest_id const & m,
1341 string const & results,
1342 app_state & app,
1343 packet_consumer & pc)
1344{
1345 bool passed = false;
1346 try
1347 {
1348 passed = lexical_cast<bool>(results);
1349 }
1350 catch (boost::bad_lexical_cast & e)
1351 {
1352 throw oops("test results must be a boolean value: '0' or '1'");
1353 }
1354 put_simple_manifest_cert(m, testresult_cert_name, lexical_cast<string>(passed), app, pc);
1355}
1356
1357void cert_manifest_rename(manifest_id const & m,
1358 rename_edge const & re,
1359 app_state & app,
1360 packet_consumer & pc)
1361{
1362 string val;
1363 write_rename_edge(re, val);
1364 put_simple_manifest_cert(m, rename_cert_name, val, app, pc);
1365}
1366
1367
1368static void calculate_vcheck_mac(manifest_id const & m,
1369 string const & seed,
1370 string & mac,
1371 app_state & app)
1372{
1373 L(F("calculating vcheck cert on %s with seed %s\n") % m % seed);
1374
1375 manifest_data mdat;
1376 manifest_map mm, mm_mac;
1377 app.db.get_manifest_version(m, mdat);
1378 read_manifest_map(mdat, mm);
1379 for (manifest_map::const_iterator i = mm.begin(); i != mm.end(); ++i)
1380 {
1381 path_id_pair pip(i);
1382 N(app.db.file_version_exists(pip.ident()),
1383F("missing file version %s for %s") % pip.ident() % pip.path());
1384
1385 file_data fdat;
1386 data dat;
1387 string fmac;
1388
1389 app.db.get_file_version(pip.ident(), fdat);
1390 unpack(fdat.inner(), dat);
1391 calculate_mac(seed, dat(), fmac);
1392 mm_mac.insert(make_pair(pip.path(), file_id(fmac)));
1393 L(F("mac of %s (seed=%s, id=%s) is %s\n") % pip.path() % seed % pip.ident() % fmac);
1394 }
1395
1396 data dat;
1397 write_manifest_map(mm_mac, dat);
1398 calculate_mac(seed, dat(), mac);
1399 L(F("mac of %d entry mac-manifest is %s\n") % mm_mac.size() % mac);
1400}
1401
1402void cert_manifest_vcheck(manifest_id const & m,
1403 app_state & app,
1404 packet_consumer & pc)
1405{
1406 string mac;
1407 string seed;
1408 make_random_seed(app, seed);
1409 P(F("calculating vcheck packet for %s with seed %s\n") % m % seed);
1410 calculate_vcheck_mac(m, seed, mac, app);
1411 string val = seed + ":" + mac;
1412 put_simple_manifest_cert(m, vcheck_cert_name, val, app, pc);
1413}
1414
1415void check_manifest_vcheck(manifest_id const & m,
1416 app_state & app)
1417{
1418
1419 vector< manifest<cert> > certs;
1420 app.db.get_manifest_certs(m, cert_name(vcheck_cert_name), certs);
1421 erase_bogus_certs(certs, app);
1422 N(certs.size() != 0,
1423 F("missing non-bogus vcheck certs on %s") % m);
1424
1425 for (vector< manifest<cert> >::const_iterator cert = certs.begin();
1426 cert != certs.end(); ++cert)
1427 {
1428 cert_value tv;
1429 decode_base64(cert->inner().value, tv);
1430
1431 string cv = tv();
1432 string::size_type colon_pos = cv.find(':');
1433
1434 N(colon_pos != string::npos ||
1435colon_pos +1 >= cv.size(),
1436F("malformed vcheck cert on %s: %s") % m % cv);
1437
1438 string seed = cv.substr(0, colon_pos);
1439 string their_mac = cv.substr(colon_pos + 1);
1440 string our_mac;
1441
1442 P(F("confirming vcheck packet on %s from %s (%d bit seed)\n")
1443% m % cert->inner().key % (seed.size() * 4));
1444
1445 calculate_vcheck_mac(m, seed, our_mac, app);
1446
1447 if (their_mac != our_mac)
1448{
1449 W(F("vcheck FAILED: key %s, id %s\n") % cert->inner().key % m);
1450 W(F("seed: %s\n") % seed);
1451 W(F("their mac: %s\n") % their_mac);
1452 W(F("our mac: %s\n") % our_mac);
1453 W(F("you should investigate the contents of manifest %s immediately\n") % m);
1454}
1455 else
1456P(F("vcheck OK: key %s, id %s\n") % cert->inner().key % m);
1457 }
1458}
1459
1460#ifdef BUILD_UNIT_TESTS
1461#include "unit_tests.hh"
1462
1463#endif // BUILD_UNIT_TESTS

Archive Download this file

Branches

Tags

Quick Links:     www.monotone.ca    -     Downloads    -     Documentation    -     Wiki    -     Code Forge    -     Build Status