monotone

monotone Mtn Source Tree

Root/rcs_import.cc

1// -*- mode: C++; c-file-style: "gnu"; indent-tabs-mode: nil -*-
2// copyright (C) 2002, 2003, 2004 graydon hoare <graydon@pobox.com>
3// all rights reserved.
4// licensed to the public under the terms of the GNU GPL (>= 2)
5// see the file COPYING for details
6
7#include <algorithm>
8#include <iostream>
9#include <iterator>
10#include <list>
11#include <map>
12#include <set>
13#include <sstream>
14#include <stack>
15#include <stdexcept>
16#include <string>
17#include <vector>
18
19#include <unistd.h>
20
21#include <boost/shared_ptr.hpp>
22#include <boost/scoped_ptr.hpp>
23#include <boost/lexical_cast.hpp>
24#include <boost/tokenizer.hpp>
25
26#include "app_state.hh"
27#include "cert.hh"
28#include "constants.hh"
29#include "cycle_detector.hh"
30#include "database.hh"
31#include "file_io.hh"
32#include "keys.hh"
33#include "interner.hh"
34#include "manifest.hh"
35#include "packet.hh"
36#include "rcs_file.hh"
37#include "sanity.hh"
38#include "transforms.hh"
39#include "ui.hh"
40#include "platform.hh"
41#include "paths.hh"
42
43using namespace std;
44using boost::shared_ptr;
45using boost::scoped_ptr;
46
47// cvs history recording stuff
48
49typedef unsigned long cvs_branchname;
50typedef unsigned long cvs_author;
51typedef unsigned long cvs_changelog;
52typedef unsigned long cvs_version;
53typedef unsigned long cvs_path;
54typedef unsigned long cvs_tag;
55
56struct cvs_history;
57
58struct
59cvs_commit
60{
61 cvs_commit(rcs_file const & r,
62 string const & rcs_version,
63 file_id const & ident,
64 cvs_history & cvs);
65
66 bool is_synthetic_branch_root;
67 time_t time;
68 bool alive;
69 cvs_author author;
70 cvs_changelog changelog;
71 cvs_version version;
72 cvs_path path;
73 vector<cvs_tag> tags;
74
75 bool operator<(cvs_commit const & other) const
76 {
77 return time < other.time;
78 }
79};
80
81struct
82cvs_branch
83{
84 bool has_a_branchpoint;
85 bool has_a_commit;
86 time_t last_branchpoint;
87 time_t first_commit;
88
89 map<cvs_path, cvs_version> live_at_beginning;
90 vector<cvs_commit> lineage;
91
92 cvs_branch()
93 : has_a_branchpoint(false),
94 has_a_commit(false),
95 last_branchpoint(0),
96 first_commit(0)
97 {
98 }
99
100 void note_commit(time_t now)
101 {
102 if (!has_a_commit)
103 {
104 first_commit = now;
105 }
106 else
107 {
108 if (now < first_commit)
109 first_commit = now;
110 }
111 has_a_commit = true;
112 }
113
114 void note_branchpoint(time_t now)
115 {
116 has_a_branchpoint = true;
117 if (now > last_branchpoint)
118 last_branchpoint = now;
119 }
120
121 time_t beginning() const
122 {
123 I(has_a_branchpoint || has_a_commit);
124 if (has_a_commit)
125 {
126 I(first_commit != 0);
127 return first_commit;
128 }
129 else
130 {
131 I(last_branchpoint != 0);
132 return last_branchpoint;
133 }
134 }
135
136 void append_commit(cvs_commit const & c)
137 {
138 I(c.time != 0);
139 note_commit(c.time);
140 lineage.push_back(c);
141 }
142};
143
144struct
145cvs_history
146{
147
148 interner<unsigned long> branch_interner;
149 interner<unsigned long> author_interner;
150 interner<unsigned long> changelog_interner;
151 interner<unsigned long> file_version_interner;
152 interner<unsigned long> path_interner;
153 interner<unsigned long> tag_interner;
154 interner<unsigned long> manifest_version_interner;
155
156 cycle_detector<unsigned long> manifest_cycle_detector;
157
158 // assume admin has foo:X.Y.0.N in it, then
159 // this multimap contains entries of the form
160 // X.Y -> foo
161 multimap<string, string> branchpoints;
162
163 // and this map contains entries of the form
164 // X.Y.N.1 -> foo
165 map<string, string> branch_first_entries;
166
167 // branch name -> branch
168 map<string, shared_ptr<cvs_branch> > branches;
169 shared_ptr<cvs_branch> trunk;
170
171 // stack of branches we're injecting states into
172 stack< shared_ptr<cvs_branch> > stk;
173 stack< cvs_branchname > bstk;
174
175 // tag -> time, revision
176 //
177 // used to resolve the *last* revision which has a given tag
178 // applied; this is the revision which wins the tag.
179 map<unsigned long, pair<time_t, revision_id> > resolved_tags;
180
181 file_path curr_file;
182 cvs_path curr_file_interned;
183
184 string base_branch;
185
186 ticker n_versions;
187 ticker n_tree_branches;
188
189 cvs_history();
190 void set_filename(string const & file,
191 file_id const & ident);
192
193 void index_branchpoint_symbols(rcs_file const & r);
194
195 void push_branch(string const & branch_name, bool private_branch);
196 void pop_branch();
197};
198
199
200static bool
201is_sbr(shared_ptr<rcs_delta> dl,
202 shared_ptr<rcs_deltatext> dt)
203{
204
205 // CVS abuses the RCS format a bit (ha!) when storing a file which
206 // was only added on a branch: on the root of the branch there'll be
207 // a commit with dead state, empty text, and a log message
208 // containing the string "file foo was initially added on branch
209 // bar". We recognize and ignore these cases, as they do not
210 // "really" represent commits to be clustered together.
211
212 if (dl->state != "dead")
213 return false;
214
215 if (!dt->text.empty())
216 return false;
217
218 string log_bit = "was initially added on branch";
219 string::const_iterator i = search(dt->log.begin(),
220 dt->log.end(),
221 log_bit.begin(),
222 log_bit.end());
223
224 return i != dt->log.end();
225}
226
227
228cvs_commit::cvs_commit(rcs_file const & r,
229 string const & rcs_version,
230 file_id const & ident,
231 cvs_history & cvs)
232{
233 map<string, shared_ptr<rcs_delta> >::const_iterator delta =
234 r.deltas.find(rcs_version);
235 I(delta != r.deltas.end());
236
237 map<string, shared_ptr<rcs_deltatext> >::const_iterator deltatext =
238 r.deltatexts.find(rcs_version);
239 I(deltatext != r.deltatexts.end());
240
241 struct tm t;
242 // We need to initialize t to all zeros, because strptime has a habit of
243 // leaving bits of the data structure alone, letting garbage sneak into
244 // our output.
245 memset(&t, 0, sizeof(t));
246 char const * dp = delta->second->date.c_str();
247 L(F("Calculating time of %s\n") % dp);
248#ifdef WIN32
249 I(sscanf(dp, "%d.%d.%d.%d.%d.%d", &(t.tm_year), &(t.tm_mon),
250 &(t.tm_mday), &(t.tm_hour), &(t.tm_min), &(t.tm_sec))==6);
251 t.tm_mon--;
252 // Apparently some RCS files have 2 digit years, others four; tm always
253 // wants a 2 (or 3) digit year (years since 1900).
254 if (t.tm_year > 1900)
255 t.tm_year-=1900;
256#else
257 if (strptime(dp, "%y.%m.%d.%H.%M.%S", &t) == NULL)
258 I(strptime(dp, "%Y.%m.%d.%H.%M.%S", &t) != NULL);
259#endif
260 time = mktime(&t);
261 L(boost::format("= %i\n") % time);
262
263 is_synthetic_branch_root = is_sbr(delta->second,
264 deltatext->second);
265
266 alive = delta->second->state != "dead";
267 if (is_synthetic_branch_root)
268 changelog = cvs.changelog_interner.intern("synthetic branch root changelog");
269 else
270 changelog = cvs.changelog_interner.intern(deltatext->second->log);
271 author = cvs.author_interner.intern(delta->second->author);
272 path = cvs.curr_file_interned;
273 version = cvs.file_version_interner.intern(ident.inner()());
274
275 typedef multimap<string,string>::const_iterator ity;
276 pair<ity,ity> range = r.admin.symbols.equal_range(rcs_version);
277 for (ity i = range.first; i != range.second; ++i)
278 {
279 if (i->first == rcs_version)
280 {
281 L(F("version %s -> tag %s\n") % rcs_version % i->second);
282 tags.push_back(cvs.tag_interner.intern(i->second));
283 }
284 }
285
286}
287
288
289// piece table stuff
290
291struct piece;
292
293struct
294piece_store
295{
296 vector< boost::shared_ptr<rcs_deltatext> > texts;
297 void index_deltatext(boost::shared_ptr<rcs_deltatext> const & dt,
298 vector<piece> & pieces);
299 void build_string(vector<piece> const & pieces,
300 string & out);
301 void reset() { texts.clear(); }
302};
303
304// FIXME: kludge, I was lazy and did not make this
305// a properly scoped variable.
306
307static piece_store global_pieces;
308
309
310struct
311piece
312{
313 piece(string::size_type p, string::size_type l, unsigned long id) :
314 pos(p), len(l), string_id(id) {}
315 string::size_type pos;
316 string::size_type len;
317 unsigned long string_id;
318 string operator*() const
319 {
320 return string(global_pieces.texts.at(string_id)->text.data() + pos, len);
321 }
322};
323
324
325void
326piece_store::build_string(vector<piece> const & pieces,
327 string & out)
328{
329 out.clear();
330 out.reserve(pieces.size() * 60);
331 for(vector<piece>::const_iterator i = pieces.begin();
332 i != pieces.end(); ++i)
333 out.append(texts.at(i->string_id)->text, i->pos, i->len);
334}
335
336void
337piece_store::index_deltatext(boost::shared_ptr<rcs_deltatext> const & dt,
338 vector<piece> & pieces)
339{
340 pieces.clear();
341 pieces.reserve(dt->text.size() / 30);
342 texts.push_back(dt);
343 unsigned long id = texts.size() - 1;
344 string::size_type begin = 0;
345 string::size_type end = dt->text.find('\n');
346 while(end != string::npos)
347 {
348 // nb: the piece includes the '\n'
349 pieces.push_back(piece(begin, (end - begin) + 1, id));
350 begin = end + 1;
351 end = dt->text.find('\n', begin);
352 }
353 if (begin != dt->text.size())
354 {
355 // the text didn't end with '\n', so neither does the piece
356 end = dt->text.size();
357 pieces.push_back(piece(begin, end - begin, id));
358 }
359}
360
361
362static void
363process_one_hunk(vector< piece > const & source,
364 vector< piece > & dest,
365 vector< piece >::const_iterator & i,
366 int & cursor)
367{
368 string directive = **i;
369 assert(directive.size() > 1);
370 ++i;
371
372 try
373 {
374 char code;
375 int pos, len;
376 if (sscanf(directive.c_str(), " %c %d %d", &code, &pos, &len) != 3)
377 throw oops("illformed directive '" + directive + "'");
378
379 if (code == 'a')
380 {
381 // 'ax y' means "copy from source to dest until cursor == x, then
382 // copy y lines from delta, leaving cursor where it is"
383 while (cursor < pos)
384 dest.push_back(source.at(cursor++));
385 I(cursor == pos);
386 while (len--)
387 dest.push_back(*i++);
388 }
389 else if (code == 'd')
390 {
391 // 'dx y' means "copy from source to dest until cursor == x-1,
392 // then increment cursor by y, ignoring those y lines"
393 while (cursor < (pos - 1))
394 dest.push_back(source.at(cursor++));
395 I(cursor == pos - 1);
396 cursor += len;
397 }
398 else
399 throw oops("unknown directive '" + directive + "'");
400 }
401 catch (std::out_of_range & oor)
402 {
403 throw oops("std::out_of_range while processing " + directive
404 + " with source.size() == "
405 + boost::lexical_cast<string>(source.size())
406 + " and cursor == "
407 + boost::lexical_cast<string>(cursor));
408 }
409}
410
411static void
412construct_version(vector< piece > const & source_lines,
413 string const & dest_version,
414 vector< piece > & dest_lines,
415 rcs_file const & r)
416{
417 dest_lines.clear();
418 dest_lines.reserve(source_lines.size());
419
420 I(r.deltas.find(dest_version) != r.deltas.end());
421 shared_ptr<rcs_delta> delta = r.deltas.find(dest_version)->second;
422
423 I(r.deltatexts.find(dest_version) != r.deltatexts.end());
424 shared_ptr<rcs_deltatext> deltatext = r.deltatexts.find(dest_version)->second;
425
426 vector<piece> deltalines;
427 global_pieces.index_deltatext(deltatext, deltalines);
428
429 int cursor = 0;
430 for (vector<piece>::const_iterator i = deltalines.begin();
431 i != deltalines.end(); )
432 process_one_hunk(source_lines, dest_lines, i, cursor);
433 while (cursor < static_cast<int>(source_lines.size()))
434 dest_lines.push_back(source_lines[cursor++]);
435}
436
437// FIXME: should these be someplace else? using 'friend' to reach into the
438// DB is stupid, but it's also stupid to put raw edge insert methods on the
439// DB itself. or is it? hmm.. encapsulation vs. usage guidance..
440void
441rcs_put_raw_file_edge(hexenc<id> const & old_id,
442 hexenc<id> const & new_id,
443 delta const & del,
444 database & db)
445{
446 if (old_id == new_id)
447 {
448 L(F("skipping identity file edge\n"));
449 return;
450 }
451
452 if (db.file_version_exists(old_id))
453 {
454 // we already have a way to get to this old version,
455 // no need to insert another reconstruction path
456 L(F("existing path to %s found, skipping\n") % old_id);
457 }
458 else
459 {
460 I(db.exists(new_id, "files")
461 || db.delta_exists(new_id, "file_deltas"));
462 db.put_delta(old_id, new_id, del, "file_deltas");
463 }
464}
465
466void
467rcs_put_raw_manifest_edge(hexenc<id> const & old_id,
468 hexenc<id> const & new_id,
469 delta const & del,
470 database & db)
471{
472 if (old_id == new_id)
473 {
474 L(F("skipping identity manifest edge\n"));
475 return;
476 }
477
478 if (db.manifest_version_exists(old_id))
479 {
480 // we already have a way to get to this old version,
481 // no need to insert another reconstruction path
482 L(F("existing path to %s found, skipping\n") % old_id);
483 }
484 else
485 {
486 db.put_delta(old_id, new_id, del, "manifest_deltas");
487 }
488}
489
490
491static void
492insert_into_db(data const & curr_data,
493 hexenc<id> const & curr_id,
494 vector< piece > const & next_lines,
495 data & next_data,
496 hexenc<id> & next_id,
497 database & db)
498{
499 // inserting into the DB
500 // note: curr_lines is a "new" (base) version
501 // and next_lines is an "old" (derived) version.
502 // all storage edges go from new -> old.
503 {
504 string tmp;
505 global_pieces.build_string(next_lines, tmp);
506 next_data = tmp;
507 }
508 delta del;
509 diff(curr_data, next_data, del);
510 calculate_ident(next_data, next_id);
511 rcs_put_raw_file_edge(next_id, curr_id, del, db);
512}
513
514
515
516/*
517
518please read this exhaustingly long comment and understand it
519before mucking with the branch inference logic.
520
521we are processing a file version. a branch might begin here. if
522the current version is X.Y, then there is a branch B starting
523here iff there is a symbol in the admin section called X.Y.0.Z,
524where Z is the branch number (or if there is a private branch
525called X.Y.Z, which is either an import branch or some private
526RCS cruft).
527
528the version X.Y is then considered the branchpoint of B in the
529current file. this does *not* mean that the CVS key -- an
530abstraction representing whole-tree operations -- of X.Y is the
531branchpoint across the CVS archive we're processing.
532
533in fact, CVS does not record the occurrence of a branching
534action (tag -b). we have no idea who executed that command and
535when. what we know instead is the commit X.Y immediately
536preceeding the branch -- CVS consideres this the branchpoint --
537in this file's reduced view of history. we also know the first
538commit X.Y.Z.1 inside the branch (which might not exist).
539
540our old strategy was to consider all branches nested in a
541hierarchy, which was a super-tree of all the branch trees in all
542the CVS files in a repository. this involved considering X.Y as
543the parent version of branch X.Y.Z, an selecting "the"
544branchpoint connecting the two as the least CVS key X.Y.Z.1
545committed inside the branch B.
546
547this was a mistake, for two significant reasons.
548
549first, some files do not *have* any commit inside the branch B,
550only a branchpoint X.Y.0.Z. this branchpoint is actually the
551last commit *before* the user branched, and could be a very old
552commit, long before the branch was formed, so it is useless in
553determining the branch structure.
554
555second, some files do not have a branch B, or worse, have
556branched into B from an "ancestor" branch A, where a different
557file branches into B from a different ancestor branch C. in
558other words, while there *is* a tree structure within the X.Y.Z
559branches of each file, there is *no* shared tree structure
560between the branch names across a repository. in one file A can
561be an ancestor of B, in another file B can be an ancestor of A.
562
563thus, we give up on establishing a hierarchy between branches
564altogether. all branches exist in a flat namespace, and all are
565direct descendents of the empty revision at the root of
566history. each branchpoint symbol mentioned in the
567administrative section of a file is considered the root of a new
568lineage.
569
570*/
571
572
573static void
574process_branch(string const & begin_version,
575 vector< piece > const & begin_lines,
576 data const & begin_data,
577 hexenc<id> const & begin_id,
578 rcs_file const & r,
579 database & db,
580 cvs_history & cvs)
581{
582 string curr_version = begin_version;
583 scoped_ptr< vector< piece > > next_lines(new vector<piece>);
584 scoped_ptr< vector< piece > > curr_lines(new vector<piece>
585 (begin_lines.begin(),
586 begin_lines.end()));
587 data curr_data(begin_data), next_data;
588 hexenc<id> curr_id(begin_id), next_id;
589
590 while(! (r.deltas.find(curr_version) == r.deltas.end()))
591 {
592 L(F("version %s has %d lines\n") % curr_version % curr_lines->size());
593
594 cvs_commit curr_commit(r, curr_version, curr_id, cvs);
595 if (!curr_commit.is_synthetic_branch_root)
596 {
597 cvs.stk.top()->append_commit(curr_commit);
598 ++cvs.n_versions;
599 }
600
601 string next_version = r.deltas.find(curr_version)->second->next;
602
603 if (! next_version.empty())
604 {
605 L(F("following RCS edge %s -> %s\n") % curr_version % next_version);
606
607 construct_version(*curr_lines, next_version, *next_lines, r);
608 L(F("constructed RCS version %s, inserting into database\n") %
609 next_version);
610
611 insert_into_db(curr_data, curr_id,
612 *next_lines, next_data, next_id, db);
613 }
614
615 // mark the beginning-of-branch time and state of this file if
616 // we're at a branchpoint
617 typedef multimap<string,string>::const_iterator ity;
618 pair<ity,ity> range = cvs.branchpoints.equal_range(curr_version);
619 if (range.first != cvs.branchpoints.end()
620 && range.first->first == curr_version)
621 {
622 for (ity i = range.first; i != range.second; ++i)
623 {
624 cvs.push_branch(i->second, false);
625 shared_ptr<cvs_branch> b = cvs.stk.top();
626 if (curr_commit.alive)
627 b->live_at_beginning[cvs.curr_file_interned] = curr_commit.version;
628 b->note_branchpoint(curr_commit.time);
629 cvs.pop_branch();
630 }
631 }
632
633
634 // recursively follow any branch commits coming from the branchpoint
635 boost::shared_ptr<rcs_delta> curr_delta = r.deltas.find(curr_version)->second;
636 for(vector<string>::const_iterator i = curr_delta->branches.begin();
637 i != curr_delta->branches.end(); ++i)
638 {
639 string branch;
640 data branch_data;
641 hexenc<id> branch_id;
642 vector< piece > branch_lines;
643 bool priv = false;
644 map<string, string>::const_iterator be = cvs.branch_first_entries.find(*i);
645
646 if (be != cvs.branch_first_entries.end())
647 branch = be->second;
648 else
649 priv = true;
650
651 L(F("following RCS branch %s = '%s'\n") % (*i) % branch);
652
653 construct_version(*curr_lines, *i, branch_lines, r);
654 insert_into_db(curr_data, curr_id,
655 branch_lines, branch_data, branch_id, db);
656
657 cvs.push_branch(branch, priv);
658 process_branch(*i, branch_lines, branch_data, branch_id, r, db, cvs);
659 cvs.pop_branch();
660
661 L(F("finished RCS branch %s = '%s'\n") % (*i) % branch);
662 }
663
664 if (!r.deltas.find(curr_version)->second->next.empty())
665 {
666 // advance
667 curr_data = next_data;
668 curr_id = next_id;
669 curr_version = next_version;
670 swap(next_lines, curr_lines);
671 next_lines->clear();
672 }
673 else break;
674 }
675}
676
677
678static void
679import_rcs_file_with_cvs(string const & filename, database & db, cvs_history & cvs)
680{
681 rcs_file r;
682 L(F("parsing RCS file %s\n") % filename);
683 parse_rcs_file(filename, r);
684 L(F("parsed RCS file %s OK\n") % filename);
685
686 {
687 vector< piece > head_lines;
688 I(r.deltatexts.find(r.admin.head) != r.deltatexts.end());
689 I(r.deltas.find(r.admin.head) != r.deltas.end());
690
691 hexenc<id> id;
692 data dat(r.deltatexts.find(r.admin.head)->second->text);
693 calculate_ident(dat, id);
694 file_id fid = id;
695
696 cvs.set_filename (filename, fid);
697 cvs.index_branchpoint_symbols (r);
698
699 if (! db.file_version_exists (fid))
700 {
701 db.put_file(fid, dat);
702 }
703
704 {
705 // create the head state in case it is a loner
706 // cvs_key k;
707 // shared_ptr<cvs_state> s;
708 // L(F("noting head version %s : %s\n") % cvs.curr_file % r.admin.head);
709 // cvs.find_key_and_state (r, r.admin.head, k, s);
710 }
711
712 global_pieces.reset();
713 global_pieces.index_deltatext(r.deltatexts.find(r.admin.head)->second, head_lines);
714 process_branch(r.admin.head, head_lines, dat, id, r, db, cvs);
715 global_pieces.reset();
716 }
717
718 ui.set_tick_trailer("");
719}
720
721
722void
723test_parse_rcs_file(system_path const & filename, database & db)
724{
725 cvs_history cvs;
726
727 I(! filename.empty());
728 assert_path_is_file(filename);
729
730 P(F("parsing RCS file %s\n") % filename);
731 rcs_file r;
732 parse_rcs_file(filename.as_external(), r);
733 P(F("parsed RCS file %s OK\n") % filename);
734}
735
736
737// CVS importing stuff follows
738
739
740static void
741split_version(string const & v, vector<string> & vs)
742{
743 vs.clear();
744 boost::char_separator<char> sep(".");
745 typedef boost::tokenizer<boost::char_separator<char> > tokenizer;
746 tokenizer tokens(v, sep);
747 copy(tokens.begin(), tokens.end(), back_inserter(vs));
748}
749
750static void
751join_version(vector<string> const & vs, string & v)
752{
753 v.clear();
754 for (vector<string>::const_iterator i = vs.begin();
755 i != vs.end(); ++i)
756 {
757 if (i != vs.begin())
758 v += ".";
759 v += *i;
760 }
761}
762
763cvs_history::cvs_history() :
764 n_versions("versions", "v", 1),
765 n_tree_branches("branches", "b", 1)
766{
767}
768
769void
770cvs_history::set_filename(string const & file,
771 file_id const & ident)
772{
773 L(F("importing file '%s'\n") % file);
774 I(file.size() > 2);
775 I(file.substr(file.size() - 2) == string(",v"));
776 string ss = file;
777 ui.set_tick_trailer(ss);
778 ss.resize(ss.size() - 2);
779 // remove Attic/ if present
780 std::string::size_type last_slash=ss.rfind('/');
781 if (last_slash!=std::string::npos && last_slash>=5
782 && ss.substr(last_slash-5,6)=="Attic/")
783 ss.erase(last_slash-5,6);
784 curr_file = file_path_internal(ss);
785 curr_file_interned = path_interner.intern(ss);
786}
787
788void cvs_history::index_branchpoint_symbols(rcs_file const & r)
789{
790 branchpoints.clear();
791 branch_first_entries.clear();
792
793 for (std::multimap<std::string, std::string>::const_iterator i =
794 r.admin.symbols.begin(); i != r.admin.symbols.end(); ++i)
795 {
796 std::string const & num = i->first;
797 std::string const & sym = i->second;
798
799 vector<string> components;
800 split_version(num, components);
801
802 vector<string> first_entry_components;
803 vector<string> branchpoint_components;
804
805 if (components.size() > 2 &&
806 (components.size() % 2 == 1))
807 {
808 // this is a "vendor" branch
809 //
810 // such as "1.1.1", where "1.1" is the branchpoint and
811 // "1.1.1.1" will be the first commit on it.
812
813 first_entry_components = components;
814 first_entry_components.push_back("1");
815
816 branchpoint_components = components;
817 branchpoint_components.erase(branchpoint_components.end() - 1,
818 branchpoint_components.end());
819
820 }
821
822 else if (components.size() > 2 &&
823 (components.size() % 2 == 0) &&
824 components[components.size() - 2] == string("0"))
825 {
826 // this is a "normal" branch
827 //
828 // such as "1.3.0.2", where "1.3" is the branchpoint and
829 // "1.3.2.1"
830
831 first_entry_components = components;
832 first_entry_components[first_entry_components.size() - 2]
833 = first_entry_components[first_entry_components.size() - 1];
834 first_entry_components[first_entry_components.size() - 1]
835 = string("1");
836
837 branchpoint_components = components;
838 branchpoint_components.erase(branchpoint_components.end() - 2,
839 branchpoint_components.end());
840 }
841
842 string first_entry_version;
843 join_version(first_entry_components, first_entry_version);
844
845 L(F("first version in branch %s would be %s\n")
846 % sym % first_entry_version);
847 branch_first_entries.insert(make_pair(first_entry_version, sym));
848
849 string branchpoint_version;
850 join_version(branchpoint_components, branchpoint_version);
851
852 L(F("file branchpoint for %s at %s\n") % sym % branchpoint_version);
853 branchpoints.insert(make_pair(branchpoint_version, sym));
854 }
855}
856
857
858
859void
860cvs_history::push_branch(string const & branch_name, bool private_branch)
861{
862 shared_ptr<cvs_branch> branch;
863
864 string bname = base_branch + "." + branch_name;
865 I(stk.size() > 0);
866
867 if (private_branch)
868 {
869 branch = shared_ptr<cvs_branch>(new cvs_branch());
870 stk.push(branch);
871 bstk.push(branch_interner.intern(""));
872 return;
873 }
874 else
875 {
876 map<string, shared_ptr<cvs_branch> >::const_iterator b = branches.find(bname);
877 if (b == branches.end())
878 {
879 branch = shared_ptr<cvs_branch>(new cvs_branch());
880 branches.insert(make_pair(bname, branch));
881 ++n_tree_branches;
882 }
883 else
884 branch = b->second;
885
886 stk.push(branch);
887 bstk.push(branch_interner.intern(bname));
888 }
889}
890
891void
892cvs_history::pop_branch()
893{
894 I(stk.size() > 1);
895 stk.pop();
896 bstk.pop();
897}
898
899
900class
901cvs_tree_walker
902 : public tree_walker
903{
904 cvs_history & cvs;
905 database & db;
906public:
907 cvs_tree_walker(cvs_history & c, database & d) :
908 cvs(c), db(d)
909 {
910 }
911 virtual void visit_file(file_path const & path)
912 {
913 string file = path.as_external();
914 if (file.substr(file.size() - 2) == string(",v"))
915 {
916 try
917 {
918 import_rcs_file_with_cvs(file, db, cvs);
919 }
920 catch (oops const & o)
921 {
922 W(F("error reading RCS file %s: %s\n") % file % o.what());
923 }
924 }
925 else
926 L(F("skipping non-RCS file %s\n") % file);
927 }
928 virtual ~cvs_tree_walker() {}
929};
930
931
932
933
934//
935// our task here is to produce a sequence of revision descriptions
936// from the per-file commit records we have. we do this by rolling
937// forwards through the temporally sorted file-commit list
938// accumulating file-commits into revisions and flushing the
939// revisions when we feel they are "complete".
940//
941// revisions have to have a time associated with them. this time
942// will be the first time of any commit associated with the
943// revision. they have an author and a changelog, which is shared
944// by all the file-commits in the revision.
945//
946// there might be multiple revisions overlapping in time. this is
947// legal wrt. CVS. we keep a set, and search all members of the set
948// for the best match.
949//
950// consider this situation of overlapping revisions:
951//
952// +---------------+ +---------------+ +---------------+
953// | rev #1 @ 0011 | | rev #2 @ 0012 | | rev #3 @ 0013 |
954// |~~~~~~~~~~~~~~~| |~~~~~~~~~~~~~~~| |~~~~~~~~~~~~~~~|
955// | patch foo.txt | | patch bar.txt | | patch baz.txt |
956// +---------------+ +---------------+ +---------------+
957//
958// suppose you have this situation and you run across a "patch
959// bar.txt" commit at timestamp 0014. what do you do?
960//
961// - you know that rev #2 cannot accept this commit, simply because
962// two commits on the same file makes *two* revisions, not one.
963//
964// - perhaps rev #3 could accept it; after all, it could be that the
965// commit associated with rev #2 released its commit lock, and the
966// commit associated with rev #3 quickly updated and committed at
967// 0013, finishing off at 0014.
968//
969// - can rev #1 accept it? no. because CVS calcualted the version it
970// expected to see in bar.txt before calling up the server, when
971// committing rev #1. the version it expected to see was the version
972// in bar.txt *before* time 0012; that is, before rev #2 had any affect
973// on bar.txt. when it contacted the server, the commit associated
974// with rev #1 would have aborted if it had seen any other number.
975// so rev #1 could not start before an edit to bar.txt and then
976// include its own edit to bar.txt.
977//
978// so we have only one case where bar.txt can be accepted. if the
979// commit is not accepted into a legal rev (outside the window,
980// wrong changelog/author) it starts a new revision.
981//
982// as we scan forwards, if we hit timestamps which lie beyond rev #n's
983// window, we flush rev #n.
984//
985// if there are multiple coincident and legal revs to direct a
986// commit to (all with the same author/changelog), we direct the
987// commit to the rev with the closest initial timestamp. that is,
988// the *latest* beginning time.
989
990struct
991cvs_cluster
992{
993 time_t first_time;
994 cvs_author author;
995 cvs_changelog changelog;
996 set<cvs_tag> tags;
997
998 cvs_cluster(time_t t,
999 cvs_author a,
1000 cvs_changelog c)
1001 : first_time(t),
1002 author(a),
1003 changelog(c)
1004 {}
1005
1006 struct entry
1007 {
1008 bool live;
1009 cvs_version version;
1010 time_t time;
1011 entry(bool l, cvs_version v, time_t t)
1012 : live(l),
1013 version(v),
1014 time(t)
1015 {}
1016 };
1017
1018 typedef map<cvs_path, entry> entry_map;
1019 entry_map entries;
1020};
1021
1022
1023struct
1024cluster_consumer
1025{
1026 cvs_history & cvs;
1027 app_state & app;
1028 string const & branchname;
1029 cvs_branch const & branch;
1030 map<cvs_path, cvs_version> live_files;
1031 ticker & n_manifests;
1032 ticker & n_revisions;
1033
1034 struct prepared_revision
1035 {
1036 prepared_revision(revision_id i,
1037 shared_ptr<revision_set> r,
1038 cvs_cluster const & c);
1039 revision_id rid;
1040 shared_ptr<revision_set> rev;
1041 time_t time;
1042 cvs_author author;
1043 cvs_changelog changelog;
1044 vector<cvs_tag> tags;
1045 };
1046
1047 vector<prepared_revision> preps;
1048
1049 manifest_map parent_map, child_map;
1050 manifest_id parent_mid, child_mid;
1051 revision_id parent_rid, child_rid;
1052
1053 cluster_consumer(cvs_history & cvs,
1054 app_state & app,
1055 string const & branchname,
1056 cvs_branch const & branch,
1057 ticker & n_manifests,
1058 ticker & n_revs);
1059
1060 void consume_cluster(cvs_cluster const & c,
1061 bool head_p);
1062 void build_change_set(cvs_cluster const & c,
1063 change_set & cs);
1064 void store_manifest_edge(bool head_p);
1065 void store_auxiliary_certs(prepared_revision const & p);
1066 void store_revisions();
1067};
1068
1069typedef shared_ptr<cvs_cluster>
1070cluster_ptr;
1071
1072struct
1073cluster_ptr_lt
1074{
1075 bool operator()(cluster_ptr const & a,
1076 cluster_ptr const & b) const
1077 {
1078 return a->first_time < b->first_time;
1079 }
1080};
1081
1082typedef set<cluster_ptr, cluster_ptr_lt>
1083cluster_set;
1084
1085void
1086import_branch(cvs_history & cvs,
1087 app_state & app,
1088 string const & branchname,
1089 shared_ptr<cvs_branch> const & branch,
1090 ticker & n_manifests,
1091 ticker & n_revs)
1092{
1093 cluster_set clusters;
1094 cluster_consumer cons(cvs, app, branchname, *branch, n_manifests, n_revs);
1095 unsigned long commits_remaining = branch->lineage.size();
1096
1097 // step 1: sort the lineage
1098 stable_sort(branch->lineage.begin(), branch->lineage.end());
1099
1100 for (vector<cvs_commit>::const_iterator i = branch->lineage.begin();
1101 i != branch->lineage.end(); ++i)
1102 {
1103 commits_remaining--;
1104
1105 L(F("examining next commit [t:%d] [p:%s] [a:%s] [c:%s]\n")
1106 % i->time
1107 % cvs.path_interner.lookup(i->path)
1108 % cvs.author_interner.lookup(i->author)
1109 % cvs.changelog_interner.lookup(i->changelog));
1110
1111 // step 2: expire all clusters from the beginning of the set which
1112 // have passed the window size
1113 while (!clusters.empty())
1114 {
1115 cluster_set::const_iterator j = clusters.begin();
1116 if ((*j)->first_time + constants::cvs_window < i->time)
1117 {
1118 L(F("expiring cluster\n"));
1119 cons.consume_cluster(**j, false);
1120 clusters.erase(j);
1121 }
1122 else
1123 break;
1124 }
1125
1126 // step 3: find the last still-live cluster to have touched this
1127 // file
1128 time_t time_of_last_cluster_touching_this_file = 0;
1129
1130 unsigned clu = 0;
1131 for (cluster_set::const_iterator j = clusters.begin();
1132 j != clusters.end(); ++j)
1133 {
1134 L(F("examining cluster %d to see if it touched %d\n")
1135 % clu++
1136 % i->path);
1137
1138 cvs_cluster::entry_map::const_iterator k = (*j)->entries.find(i->path);
1139 if ((k != (*j)->entries.end())
1140 && (k->second.time > time_of_last_cluster_touching_this_file))
1141 {
1142 L(F("found cluster touching %d: [t:%d] [a:%d] [c:%d]\n")
1143 % i->path
1144 % (*j)->first_time
1145 % (*j)->author
1146 % (*j)->changelog);
1147 time_of_last_cluster_touching_this_file = (*j)->first_time;
1148 }
1149 }
1150 L(F("last modification time is %d\n")
1151 % time_of_last_cluster_touching_this_file);
1152
1153 // step 4: find a cluster which starts on or after the
1154 // last_modify_time, which doesn't modify the file in question,
1155 // and which contains the same author and changelog as our
1156 // commit
1157 cluster_ptr target;
1158 for (cluster_set::const_iterator j = clusters.begin();
1159 j != clusters.end(); ++j)
1160 {
1161 if (((*j)->first_time >= time_of_last_cluster_touching_this_file)
1162 && ((*j)->author == i->author)
1163 && ((*j)->changelog == i->changelog)
1164 && ((*j)->entries.find(i->path) == (*j)->entries.end()))
1165 {
1166 L(F("picked existing cluster [t:%d] [a:%d] [c:%d]\n")
1167 % (*j)->first_time
1168 % (*j)->author
1169 % (*j)->changelog);
1170
1171 target = (*j);
1172 }
1173 }
1174
1175 // if we're still not finding an active cluster,
1176 // this is probably the first commit in it. make
1177 // a new one.
1178 if (!target)
1179 {
1180 L(F("building new cluster [t:%d] [a:%d] [c:%d]\n")
1181 % i->time
1182 % i->author
1183 % i->changelog);
1184
1185 target = cluster_ptr(new cvs_cluster(i->time,
1186 i->author,
1187 i->changelog));
1188 clusters.insert(target);
1189 }
1190
1191 I(target);
1192 target->entries.insert(make_pair(i->path,
1193 cvs_cluster::entry(i->alive,
1194 i->version,
1195 i->time)));
1196 for (vector<cvs_tag>::const_iterator j = i->tags.begin();
1197 j != i->tags.end(); ++j)
1198 {
1199 target->tags.insert(*j);
1200 }
1201 }
1202
1203
1204 // now we are done this lineage; flush all remaining clusters
1205 L(F("finished branch commits, writing all pending clusters\n"));
1206 while (!clusters.empty())
1207 {
1208 cons.consume_cluster(**clusters.begin(), clusters.size() == 1);
1209 clusters.erase(clusters.begin());
1210 }
1211 L(F("finished writing pending clusters\n"));
1212
1213 cons.store_revisions();
1214
1215}
1216
1217
1218void
1219import_cvs_repo(system_path const & cvsroot,
1220 app_state & app)
1221{
1222 N(!directory_exists(cvsroot / "CVSROOT"),
1223 F("%s appears to be a CVS repository root directory\n"
1224 "try importing a module instead, with 'cvs_import %s/<module_name>")
1225 % cvsroot % cvsroot);
1226
1227 {
1228 // early short-circuit to avoid failure after lots of work
1229 rsa_keypair_id key;
1230 N(guess_default_key(key,app),
1231 F("no unique private key for cert construction"));
1232 require_password(key, app);
1233 }
1234
1235 cvs_history cvs;
1236 N(app.branch_name() != "", F("need base --branch argument for importing"));
1237 cvs.base_branch = app.branch_name();
1238
1239 // push the trunk
1240 cvs.trunk = shared_ptr<cvs_branch>(new cvs_branch());
1241 cvs.stk.push(cvs.trunk);
1242 cvs.bstk.push(cvs.branch_interner.intern(cvs.base_branch));
1243
1244 {
1245 transaction_guard guard(app.db);
1246 cvs_tree_walker walker(cvs, app.db);
1247 require_path_is_directory(cvsroot,
1248 F("path %s does not exist") % cvsroot,
1249 F("'%s' is not a directory") % cvsroot);
1250 app.db.ensure_open();
1251 change_current_working_dir(cvsroot);
1252 walk_tree(file_path(), walker);
1253 guard.commit();
1254 }
1255
1256 I(cvs.stk.size() == 1);
1257
1258 ticker n_revs(_("revisions"), "r", 1);
1259 ticker n_manifests(_("manifests"), "m", 1);
1260
1261 while (cvs.branches.size() > 0)
1262 {
1263 transaction_guard guard(app.db);
1264 map<string, shared_ptr<cvs_branch> >::const_iterator i = cvs.branches.begin();
1265 string branchname = i->first;
1266 shared_ptr<cvs_branch> branch = i->second;
1267 L(F("branch %s has %d entries\n") % branchname % branch->lineage.size());
1268 import_branch(cvs, app, branchname, branch, n_manifests, n_revs);
1269
1270 // free up some memory
1271 cvs.branches.erase(branchname);
1272 guard.commit();
1273 }
1274
1275 {
1276 transaction_guard guard(app.db);
1277 L(F("trunk has %d entries\n") % cvs.trunk->lineage.size());
1278 import_branch(cvs, app, cvs.base_branch, cvs.trunk, n_manifests, n_revs);
1279 guard.commit();
1280 }
1281
1282 // now we have a "last" rev for each tag
1283 {
1284 ticker n_tags(_("tags"), "t", 1);
1285 packet_db_writer dbw(app);
1286 transaction_guard guard(app.db);
1287 for (map<unsigned long, pair<time_t, revision_id> >::const_iterator i = cvs.resolved_tags.begin();
1288 i != cvs.resolved_tags.end(); ++i)
1289 {
1290 string tag = cvs.tag_interner.lookup(i->first);
1291 ui.set_tick_trailer("marking tag " + tag);
1292 cert_revision_tag(i->second.second, tag, app, dbw);
1293 ++n_tags;
1294 }
1295 guard.commit();
1296 }
1297
1298
1299 return;
1300
1301}
1302
1303cluster_consumer::cluster_consumer(cvs_history & cvs,
1304 app_state & app,
1305 string const & branchname,
1306 cvs_branch const & branch,
1307 ticker & n_mans,
1308 ticker & n_revs)
1309 : cvs(cvs),
1310 app(app),
1311 branchname(branchname),
1312 branch(branch),
1313 n_manifests(n_mans),
1314 n_revisions(n_revs)
1315{
1316 if (!branch.live_at_beginning.empty())
1317 {
1318 cvs_author synthetic_author =
1319 cvs.author_interner.intern("cvs_import");
1320
1321 cvs_changelog synthetic_cl =
1322 cvs.changelog_interner.intern("beginning of branch "
1323 + branchname);
1324
1325 time_t synthetic_time = branch.beginning();
1326 cvs_cluster initial_cluster(synthetic_time,
1327 synthetic_author,
1328 synthetic_cl);
1329
1330 L(F("initial cluster on branch %s has %d live entries\n") %
1331 branchname % branch.live_at_beginning.size());
1332
1333 for (map<cvs_path, cvs_version>::const_iterator i = branch.live_at_beginning.begin();
1334 i != branch.live_at_beginning.end(); ++i)
1335 {
1336 cvs_cluster::entry e(true, i->second, synthetic_time);
1337 L(F("initial cluster contains %s at %s\n") %
1338 cvs.path_interner.lookup(i->first) %
1339 cvs.file_version_interner.lookup(i->second));
1340 initial_cluster.entries.insert(make_pair(i->first, e));
1341 }
1342 consume_cluster(initial_cluster, branch.lineage.empty());
1343 }
1344}
1345
1346cluster_consumer::prepared_revision::prepared_revision(revision_id i,
1347 shared_ptr<revision_set> r,
1348 cvs_cluster const & c)
1349 : rid(i),
1350 rev(r),
1351 time(c.first_time),
1352 author(c.author),
1353 changelog(c.changelog)
1354{
1355 for (set<cvs_tag>::const_iterator i = c.tags.begin();
1356 i != c.tags.end(); ++i)
1357 {
1358 tags.push_back(*i);
1359 }
1360}
1361
1362
1363void
1364cluster_consumer::store_revisions()
1365{
1366 for (vector<prepared_revision>::const_iterator i = preps.begin();
1367 i != preps.end(); ++i)
1368 {
1369 if (! app.db.revision_exists(i->rid))
1370 {
1371 data tmp;
1372 write_revision_set(*(i->rev), tmp);
1373 app.db.put_revision(i->rid, *(i->rev));
1374 store_auxiliary_certs(*i);
1375 ++n_revisions;
1376 }
1377 }
1378}
1379
1380void
1381cluster_consumer::store_manifest_edge(bool head_p)
1382{
1383 L(F("storing manifest '%s' (base %s)\n") % parent_mid % child_mid);
1384 ++n_manifests;
1385
1386 if (head_p)
1387 {
1388 L(F("storing head %s\n") % child_mid);
1389 // a branch has one very important manifest: the head. this is
1390 // the "newest" of all manifests within the branch (including
1391 // the trunk), and we store it in its entirety, before the
1392 // cluster consumer is destroyed.
1393 if (! app.db.manifest_version_exists(child_mid))
1394 {
1395 manifest_data mdat;
1396 write_manifest_map(child_map, mdat);
1397 app.db.put_manifest(child_mid, mdat);
1398 }
1399 }
1400
1401 if (null_id(parent_mid))
1402 {
1403 L(F("skipping delta to null manifest\n"));
1404 return;
1405 }
1406
1407 unsigned long older, newer;
1408
1409 older = cvs.manifest_version_interner.intern(parent_mid.inner()());
1410 newer = cvs.manifest_version_interner.intern(child_mid.inner()());
1411
1412 if (cvs.manifest_cycle_detector.edge_makes_cycle(older,newer))
1413 {
1414
1415 L(F("skipping cyclical manifest delta %s -> %s\n")
1416 % parent_mid % child_mid);
1417 // we are potentially breaking the chain one would use to get to
1418 // p. we need to make sure p exists.
1419 if (!app.db.manifest_version_exists(parent_mid))
1420 {
1421 L(F("writing full manifest %s\n") % parent_mid);
1422 manifest_data mdat;
1423 write_manifest_map(parent_map, mdat);
1424 app.db.put_manifest(parent_mid, mdat);
1425 }
1426 return;
1427 }
1428
1429 cvs.manifest_cycle_detector.put_edge(older,newer);
1430
1431 L(F("storing manifest delta %s -> %s\n")
1432 % child_mid % parent_mid);
1433
1434 // the ancestry-based 'child' is a 'new' version as far as the
1435 // storage system is concerned; that is to say that the
1436 // ancestry-based 'parent' is a temporally older tree version, which
1437 // can be constructed from the 'newer' child. so the delta should
1438 // run from child (new) -> parent (old).
1439
1440 delta del;
1441 diff(child_map, parent_map, del);
1442 rcs_put_raw_manifest_edge(parent_mid.inner(),
1443 child_mid.inner(),
1444 del, app.db);
1445}
1446
1447void
1448cluster_consumer::store_auxiliary_certs(prepared_revision const & p)
1449{
1450 packet_db_writer dbw(app);
1451
1452 for (vector<cvs_tag>::const_iterator i = p.tags.begin();
1453 i != p.tags.end(); ++i)
1454 {
1455 map<unsigned long, pair<time_t, revision_id> >::const_iterator j
1456 = cvs.resolved_tags.find(*i);
1457
1458 if (j != cvs.resolved_tags.end())
1459 {
1460 if (j->second.first < p.time)
1461 {
1462 // move the tag forwards
1463 cvs.resolved_tags.erase(*i);
1464 cvs.resolved_tags.insert(make_pair(*i, make_pair(p.time, p.rid)));
1465 }
1466 }
1467 else
1468 {
1469 cvs.resolved_tags.insert(make_pair(*i, make_pair(p.time, p.rid)));
1470 }
1471 }
1472
1473 cert_revision_in_branch(p.rid, cert_value(branchname), app, dbw);
1474 cert_revision_author(p.rid, cvs.author_interner.lookup(p.author), app, dbw);
1475 cert_revision_changelog(p.rid, cvs.changelog_interner.lookup(p.changelog), app, dbw);
1476 cert_revision_date_time(p.rid, p.time, app, dbw);
1477}
1478
1479void
1480cluster_consumer::build_change_set(cvs_cluster const & c,
1481 change_set & cs)
1482{
1483 for (cvs_cluster::entry_map::const_iterator i = c.entries.begin();
1484 i != c.entries.end(); ++i)
1485 {
1486 file_path pth = file_path_internal(cvs.path_interner.lookup(i->first));
1487 file_id fid(cvs.file_version_interner.lookup(i->second.version));
1488 if (i->second.live)
1489 {
1490 map<cvs_path, cvs_version>::const_iterator e = live_files.find(i->first);
1491 if (e == live_files.end())
1492 {
1493 L(F("adding entry state '%s' on '%s'\n") % fid % pth);
1494 cs.add_file(pth, fid);
1495 live_files[i->first] = i->second.version;
1496 }
1497 else if (e->second != i->second.version)
1498 {
1499 file_id old_fid(cvs.file_version_interner.lookup(e->second));
1500 L(F("applying state delta on '%s' : '%s' -> '%s'\n")
1501 % pth % old_fid % fid);
1502 cs.apply_delta(pth, old_fid, fid);
1503 live_files[i->first] = i->second.version;
1504 }
1505 }
1506 else
1507 {
1508 map<cvs_path, cvs_version>::const_iterator e = live_files.find(i->first);
1509 if (e != live_files.end())
1510 {
1511 L(F("deleting entry state '%s' on '%s'\n") % fid % pth);
1512 cs.delete_file(pth);
1513 live_files.erase(i->first);
1514 }
1515 }
1516 }
1517}
1518
1519void
1520cluster_consumer::consume_cluster(cvs_cluster const & c,
1521 bool head_p)
1522{
1523 // we should never have an empty cluster; it's *possible* to have
1524 // an empty changeset (say on a vendor import) but every cluster
1525 // should have been created by at least one file commit, even
1526 // if the commit made no changes. it's a logical inconsistency if
1527 // you have an empty cluster.
1528 I(!c.entries.empty());
1529
1530 L(F("BEGIN consume_cluster()\n"));
1531 shared_ptr<revision_set> rev(new revision_set());
1532 boost::shared_ptr<change_set> cs(new change_set());
1533 build_change_set(c, *cs);
1534
1535 apply_change_set(*cs, child_map);
1536 calculate_ident(child_map, child_mid);
1537
1538 rev->new_manifest = child_mid;
1539 rev->edges.insert(make_pair(parent_rid, make_pair(parent_mid, cs)));
1540 calculate_ident(*rev, child_rid);
1541
1542 store_manifest_edge(head_p);
1543
1544 preps.push_back(prepared_revision(child_rid, rev, c));
1545
1546 // now apply same change set to parent_map, making parent_map == child_map
1547 apply_change_set(*cs, parent_map);
1548 parent_mid = child_mid;
1549 parent_rid = child_rid;
1550 L(F("END consume_cluster('%s') (parent '%s')\n") % child_rid % rev->edges.begin()->first);
1551}

Archive Download this file

Branches

Tags

Quick Links:     www.monotone.ca    -     Downloads    -     Documentation    -     Wiki    -     Code Forge    -     Build Status