monotone

monotone Mtn Source Tree

Root/src/migrate_schema.cc

1// Copyright (C) 2002 Graydon Hoare <graydon@pobox.com>
2//
3// This program is made available under the GNU GPL version 2.0 or
4// greater. See the accompanying file COPYING for details.
5//
6// This program is distributed WITHOUT ANY WARRANTY; without even the
7// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
8// PURPOSE.
9
10#include "base.hh"
11#include <boost/tokenizer.hpp>
12#include "lexical_cast.hh"
13#include <sqlite3.h>
14#include <cstring>
15
16// ...ow.
17#include <set>
18#include <map>
19
20#include "sanity.hh"
21#include "migration.hh"
22#include "key_store.hh"
23#include "transforms.hh"
24#include "constants.hh"
25
26using std::string;
27
28// this file knows how to migrate schema databases. the general strategy is
29// to hash each schema we ever use, and make a list of the SQL commands
30// required to get from one hash value to the next. when you do a
31// migration, the migrator locates your current db's state on the list and
32// then runs all the migration functions between that point and the target
33// of the migration.
34
35// you will notice a little bit of duplicated code between here and
36// database.cc; this was originally to facilitate inclusion of migration
37// capability into the depot code, but is now preserved because the code
38// in this file is easier to write and understand if it speaks directly
39// to sqlite.
40
41// Wrappers around the bare sqlite3 API. We do not use sqlite3_exec because
42// we want the better error handling that sqlite3_prepare_v2 gives us.
43
44void
45assert_sqlite3_ok(sqlite3 * db)
46{
47 int errcode = sqlite3_errcode(db);
48
49 if (errcode == SQLITE_OK)
50 return;
51
52 char const * errmsg = sqlite3_errmsg(db);
53
54 // first log the code so we can find _out_ what the confusing code
55 // was... note that code does not uniquely identify the errmsg, unlike
56 // errno's.
57 L(FL("sqlite error: %d: %s") % errcode % errmsg);
58
59 // Check the string to see if it looks like a recoverable_failure
60 // thrown from within an SQL extension function, caught, and turned
61 // into a call to sqlite3_result_error. (Extension functions have to
62 // do this to avoid corrupting sqlite's internal state.) If it is,
63 // rethrow it rather than feeding it to E(), lest we get "error:
64 // sqlite error: error: " ugliness.
65 char const *pfx = _("error: ");
66 if (!std::strncmp(errmsg, pfx, strlen(pfx)))
67 throw recoverable_failure(origin::database, errmsg);
68
69 // sometimes sqlite is not very helpful
70 // so we keep a table of errors people have gotten and more helpful versions
71 char const * auxiliary_message = "";
72 switch (errcode)
73 {
74 // All memory-exhaustion conditions should give the same diagnostic.
75 case SQLITE_NOMEM:
76 throw std::bad_alloc();
77
78 // These diagnostics generally indicate an operating-system-level
79 // failure. It would be nice to throw strerror(errno) in there but
80 // we cannot assume errno is still valid by the time we get here.
81 case SQLITE_IOERR:
82 case SQLITE_CANTOPEN:
83 case SQLITE_PROTOCOL:
84 auxiliary_message
85 = _("make sure database and containing directory are writeable\n"
86 "and you have not run out of disk space");
87 break;
88
89 // These error codes may indicate someone is trying to load a database
90 // so old that it's in sqlite 2's disk format (monotone 0.16 or
91 // older).
92 case SQLITE_CORRUPT:
93 case SQLITE_NOTADB:
94 auxiliary_message
95 = _("(if this is a database last used by monotone 0.16 or older,\n"
96 "you must follow a special procedure to make it usable again.\n"
97 "see the file UPGRADE, in the distribution, for instructions.)");
98
99 default:
100 break;
101 }
102
103 // if the auxiliary message is empty, the \n will be stripped off too
104 E(false, origin::system,
105 F("sqlite error: %s\n%s") % errmsg % auxiliary_message);
106}
107
108
109namespace
110{
111 struct sql
112 {
113 sql(sqlite3 * db, int cols, char const *cmd, char const **afterp = 0)
114 : stmt(0), ncols(cols)
115 {
116 sqlite3_stmt * s;
117
118 char const * after;
119 L(FL("executing SQL '%s'") % cmd);
120
121 sqlite3_prepare_v2(db, cmd, strlen(cmd), &s, &after);
122 MM(cmd);
123 MM(after);
124 assert_sqlite3_ok(db);
125
126 I(s);
127 if (afterp)
128 *afterp = after;
129 else
130 I(*after == 0);
131 I(sqlite3_column_count(s) == ncols);
132 stmt = s;
133 }
134 ~sql()
135 {
136 if (stmt)
137 sqlite3_finalize(stmt);
138 }
139
140 bool step()
141 {
142 int res = sqlite3_step(stmt);
143 if (res == SQLITE_ROW)
144 return true;
145 if (res == SQLITE_DONE)
146 {
147 L(FL("success"));
148 return false;
149 }
150 // Diagnostics from sqlite3_result_error show up in sqlite3_errmsg
151 // only after sqlite3_finalize or sqlite3_reset are called on the
152 // stmt object. See SQLite ticket #1640.
153 sqlite3 * db = sqlite3_db_handle(stmt);
154 sqlite3_finalize(stmt);
155 stmt = 0;
156 assert_sqlite3_ok(db);
157 I(false);
158 }
159 int column_int(int col)
160 {
161 I(col >= 0 && col < ncols);
162 return sqlite3_column_int(stmt, col);
163 }
164 string column_string(int col)
165 {
166 I(col >= 0 && col < ncols);
167 return string(reinterpret_cast<char const *>
168 (sqlite3_column_text(stmt, col)));
169 }
170 string column_blob(int col)
171 {
172 I(col >= 0 && col < ncols);
173 char const * base =
174 reinterpret_cast<char const *>(sqlite3_column_blob(stmt, col));
175 int len = sqlite3_column_bytes(stmt, col);
176 return string(base, base + len);
177 }
178 bool column_nonnull(int col)
179 {
180 I(col >= 0 && col < ncols);
181 return sqlite3_column_type(stmt, col) != SQLITE_NULL;
182 }
183
184 // convenience for executing a sequence of sql statements,
185 // none of which returns any rows.
186 static void exec(sqlite3 * db, char const * cmd)
187 {
188 do
189 {
190 sql stmt(db, 0, cmd, &cmd);
191 I(stmt.step() == false);
192 }
193 while (*cmd != '\0');
194 }
195
196 // convenience for evaluating an expression that returns a single number.
197 static int value(sqlite3 * db, char const * cmd)
198 {
199 sql stmt(db, 1, cmd);
200
201 I(stmt.step() == true);
202 int res = stmt.column_int(0);
203 I(stmt.step() == false);
204
205 return res;
206 }
207
208 // convenience for making functions
209 static void create_function(sqlite3 * db, char const * name,
210 void (*fn)(sqlite3_context *,
211 int, sqlite3_value **))
212 {
213 sqlite3_create_function(db, name, -1, SQLITE_UTF8, 0, fn, 0, 0);
214 assert_sqlite3_ok(db);
215 }
216
217 private:
218 sqlite3_stmt * stmt;
219 int ncols;
220 };
221
222 struct transaction
223 {
224 transaction(sqlite3 * s) : db(s), committed(false)
225 {
226 sql::exec(db, "BEGIN EXCLUSIVE");
227 }
228 void commit()
229 {
230 I(committed == false);
231 committed = true;
232 }
233 ~transaction()
234 {
235 if (committed)
236 sql::exec(db, "COMMIT");
237 else
238 sql::exec(db, "ROLLBACK");
239 }
240 private:
241 sqlite3 * db;
242 bool committed;
243 };
244}
245
246// SQL extension functions.
247
248// sqlite3_value_text returns unsigned char const *, which is inconvenient
249inline char const *
250sqlite3_value_cstr(sqlite3_value * arg)
251{
252 return reinterpret_cast<char const *>(sqlite3_value_text(arg));
253}
254
255inline bool is_ws(char c)
256{
257 return c == '\r' || c == '\n' || c == '\t' || c == ' ';
258}
259
260static void
261sqlite3_sha1_fn_body(sqlite3_context *f, int nargs, sqlite3_value ** args,
262 bool strip_whitespace)
263{
264 if (nargs <= 1)
265 {
266 sqlite3_result_error(f, "need at least 1 arg to sha1()", -1);
267 return;
268 }
269
270 string tmp;
271 if (nargs == 1)
272 {
273 char const * s = sqlite3_value_cstr(args[0]);
274 char const * end = s + sqlite3_value_bytes(args[0]) - 1;
275 remove_copy_if(s, end, back_inserter(tmp), is_ws);
276 }
277 else
278 {
279 char const * sep = sqlite3_value_cstr(args[0]);
280
281 for (int i = 1; i < nargs; ++i)
282 {
283 if (i > 1)
284 tmp += sep;
285 char const * s = sqlite3_value_cstr(args[i]);
286 char const * end = s + sqlite3_value_bytes(args[i]) - 1;
287 if (strip_whitespace)
288 {
289 remove_copy_if(s, end, back_inserter(tmp), is_ws);
290 }
291 else
292 {
293 tmp.append(s, end+1);
294 }
295 }
296 }
297
298 id hash;
299 calculate_ident(data(tmp, origin::database), hash);
300 sqlite3_result_blob(f, hash().c_str(), hash().size(), SQLITE_TRANSIENT);
301}
302
303static void
304sqlite3_sha1_nows_fn(sqlite3_context *f, int nargs, sqlite3_value ** args)
305{
306 sqlite3_sha1_fn_body(f, nargs, args, true);
307}
308
309static void
310sqlite3_sha1_fn(sqlite3_context *f, int nargs, sqlite3_value ** args)
311{
312 sqlite3_sha1_fn_body(f, nargs, args, false);
313}
314
315static void
316sqlite3_unbase64_fn(sqlite3_context *f, int nargs, sqlite3_value ** args)
317{
318 if (nargs != 1)
319 {
320 sqlite3_result_error(f, "need exactly 1 arg to unbase64()", -1);
321 return;
322 }
323 string decoded;
324
325 // This operation may throw informative_failure. We must intercept that
326 // and turn it into a call to sqlite3_result_error, or rollback will fail.
327 try
328 {
329 decoded = decode_base64_as<string>(sqlite3_value_cstr(args[0]),
330 origin::no_fault);
331 }
332 catch (recoverable_failure & e)
333 {
334 sqlite3_result_error(f, e.what(), -1);
335 return;
336 }
337 sqlite3_result_blob(f, decoded.c_str(), decoded.size(), SQLITE_TRANSIENT);
338}
339
340static void
341sqlite3_unhex_fn(sqlite3_context *f, int nargs, sqlite3_value **args)
342{
343 if (nargs != 1)
344 {
345 sqlite3_result_error(f, "need exactly 1 arg to unhex()", -1);
346 return;
347 }
348 string decoded;
349
350 // This operation may throw informative_failure. We must intercept that
351 // and turn it into a call to sqlite3_result_error, or rollback will fail.
352 try
353 {
354 decoded = decode_hexenc(sqlite3_value_cstr(args[0]), origin::no_fault);
355 }
356 catch (recoverable_failure & e)
357 {
358 sqlite3_result_error(f, e.what(), -1);
359 return;
360 }
361 // This is only ever used with 20-byte SHA1 hashes or empty strings, so
362 // make sure that's what we've got.
363 if (decoded.size() != constants::idlen_bytes && !decoded.empty())
364 {
365 sqlite3_result_error(f, "unhex() result is the wrong length", -1);
366 return;
367 }
368
369 sqlite3_result_blob(f, decoded.data(), decoded.size(), SQLITE_TRANSIENT);
370}
371
372// Here are all of the migration steps. Almost all of them can be expressed
373// entirely as a series of SQL statements; those statements are packaged
374// into a long, continued string constant for the step. A few require a
375// function instead.
376//
377// Please keep this list in the same order as the migration_events table
378// below.
379
380char const migrate_merge_url_and_group[] =
381 // migrate the posting_queue table
382 "ALTER TABLE posting_queue RENAME TO tmp;"
383 "CREATE TABLE posting_queue"
384 " ( url not null, -- URL we are going to send this to\n"
385 " content not null -- the packets we're going to send\n"
386 " );"
387 "INSERT INTO posting_queue"
388 " SELECT (url || '/' || groupname), content FROM tmp;"
389 "DROP TABLE tmp;"
390
391 // migrate the incoming_queue table
392 "ALTER TABLE incoming_queue RENAME TO tmp;"
393 "CREATE TABLE incoming_queue "
394 " ( url not null, -- URL we got this bundle from\n"
395 " content not null -- the packets we're going to read\n"
396 " );"
397 "INSERT INTO incoming_queue"
398 " SELECT (url || '/' || groupname), content FROM tmp;"
399 "DROP TABLE tmp;"
400
401 // migrate the sequence_numbers table
402 "ALTER TABLE sequence_numbers RENAME TO tmp;"
403 "CREATE TABLE sequence_numbers "
404 " ( url primary key, -- URL to read from\n"
405 " major not null, -- 0 in news servers, may be higher in depots\n"
406 " minor not null -- last article / packet sequence number we got\n"
407 " );"
408 "INSERT INTO sequence_numbers"
409 " SELECT (url || '/' || groupname), major, minor FROM tmp;"
410 "DROP TABLE tmp;"
411
412 // migrate the netserver_manifests table
413 "ALTER TABLE netserver_manifests RENAME TO tmp;"
414 "CREATE TABLE netserver_manifests"
415 " ( url not null, -- url of some server\n"
416 " manifest not null, -- manifest which exists on url\n"
417 " unique(url, manifest)"
418 " );"
419 "INSERT INTO netserver_manifests"
420 " SELECT (url || '/' || groupname), manifest FROM tmp;"
421
422 "DROP TABLE tmp;"
423 ;
424
425char const migrate_add_hashes_and_merkle_trees[] =
426 // add the column to manifest_certs
427 "ALTER TABLE manifest_certs RENAME TO tmp;"
428 "CREATE TABLE manifest_certs"
429 " ( hash not null unique, -- hash of remaining fields separated by \":\"\n"
430 " id not null, -- joins with manifests.id or manifest_deltas.id\n"
431 " name not null, -- opaque string chosen by user\n"
432 " value not null, -- opaque blob\n"
433 " keypair not null, -- joins with public_keys.id\n"
434 " signature not null, -- RSA/SHA1 signature of \"[name@id:val]\"\n"
435 " unique(name, id, value, keypair, signature)"
436 " );"
437 "INSERT INTO manifest_certs"
438 " SELECT sha1_nows(':', id, name, value, keypair, signature),"
439 " id, name, value, keypair, signature"
440 " FROM tmp;"
441 "DROP TABLE tmp;"
442
443 // add the column to file_certs
444 "ALTER TABLE file_certs RENAME TO tmp;"
445 "CREATE TABLE file_certs"
446 " ( hash not null unique, -- hash of remaining fields separated by \":\"\n"
447 " id not null, -- joins with files.id or file_deltas.id\n"
448 " name not null, -- opaque string chosen by user\n"
449 " value not null, -- opaque blob\n"
450 " keypair not null, -- joins with public_keys.id\n"
451 " signature not null, -- RSA/SHA1 signature of \"[name@id:val]\"\n"
452 " unique(name, id, value, keypair, signature)"
453 " );"
454 "INSERT INTO file_certs"
455 " SELECT sha1_nows(':', id, name, value, keypair, signature),"
456 " id, name, value, keypair, signature"
457 " FROM tmp;"
458 "DROP TABLE tmp;"
459
460 // add the column to public_keys
461 "ALTER TABLE public_keys RENAME TO tmp;"
462 "CREATE TABLE public_keys"
463 " ( hash not null unique, -- hash of remaining fields separated by \":\"\n"
464 " id primary key, -- key identifier chosen by user\n"
465 " keydata not null -- RSA public params\n"
466 " );"
467 "INSERT INTO public_keys SELECT sha1_nows(':',id,keydata), id, keydata FROM tmp;"
468 "DROP TABLE tmp;"
469
470 // add the column to private_keys
471 "ALTER TABLE private_keys RENAME TO tmp;"
472 "CREATE TABLE private_keys"
473 " ( hash not null unique, -- hash of remaining fields separated by \":\"\n"
474 " id primary key, -- as in public_keys (same identifiers, in fact)\n"
475 " keydata not null -- encrypted RSA private params\n"
476 " );"
477 "INSERT INTO private_keys SELECT sha1_nows(':',id,keydata), id, keydata FROM tmp;"
478 "DROP TABLE tmp;"
479
480 // add the merkle tree stuff
481 "CREATE TABLE merkle_nodes"
482 " ( type not null, -- \"key\", \"mcert\", \"fcert\", \"manifest\"\n"
483 " collection not null, -- name chosen by user\n"
484 " level not null, -- tree level this prefix encodes\n"
485 " prefix not null, -- label identifying node in tree\n"
486 " body not null, -- binary, base64'ed node contents\n"
487 " unique(type, collection, level, prefix)"
488 ");"
489 ;
490
491char const migrate_to_revisions[] =
492 "DROP TABLE schema_version;"
493 "DROP TABLE posting_queue;"
494 "DROP TABLE incoming_queue;"
495 "DROP TABLE sequence_numbers;"
496 "DROP TABLE file_certs;"
497 "DROP TABLE netserver_manifests;"
498 "DROP TABLE merkle_nodes;"
499
500 "CREATE TABLE merkle_nodes"
501 " ( type not null, -- \"key\", \"mcert\", \"fcert\", \"rcert\"\n"
502 " collection not null, -- name chosen by user\n"
503 " level not null, -- tree level this prefix encodes\n"
504 " prefix not null, -- label identifying node in tree\n"
505 " body not null, -- binary, base64'ed node contents\n"
506 " unique(type, collection, level, prefix)"
507 " );"
508
509 "CREATE TABLE revision_certs"
510 " ( hash not null unique, -- hash of remaining fields separated by \":\"\n"
511 " id not null, -- joins with revisions.id\n"
512 " name not null, -- opaque string chosen by user\n"
513 " value not null, -- opaque blob\n"
514 " keypair not null, -- joins with public_keys.id\n"
515 " signature not null, -- RSA/SHA1 signature of \"[name@id:val]\"\n"
516 " unique(name, id, value, keypair, signature)"
517 " );"
518
519 "CREATE TABLE revisions"
520 " ( id primary key, -- SHA1(text of revision)\n"
521 " data not null -- compressed, encoded contents of a revision\n"
522 " );"
523
524 "CREATE TABLE revision_ancestry"
525 " ( parent not null, -- joins with revisions.id\n"
526 " child not null, -- joins with revisions.id\n"
527 " unique(parent, child)"
528 " );"
529 ;
530
531char const migrate_to_epochs[] =
532 "DROP TABLE merkle_nodes;"
533 "CREATE TABLE branch_epochs\n"
534 " ( hash not null unique, -- hash of remaining fields separated by \":\"\n"
535 " branch not null unique, -- joins with revision_certs.value\n"
536 " epoch not null -- random hex-encoded id\n"
537 " );"
538 ;
539
540char const migrate_to_vars[] =
541 "CREATE TABLE db_vars\n"
542 " ( domain not null, -- scope of application of a var\n"
543 " name not null, -- var key\n"
544 " value not null, -- var value\n"
545 " unique(domain, name)"
546 " );"
547 ;
548
549char const migrate_add_indexes[] =
550 "CREATE INDEX revision_ancestry__child ON revision_ancestry (child);"
551 "CREATE INDEX revision_certs__id ON revision_certs (id);"
552 "CREATE INDEX revision_certs__name_value ON revision_certs (name, value);"
553 ;
554
555// There is, perhaps, an argument for turning the logic inside the
556// while-loop into a callback function like unbase64(). However, we'd have
557// to get the key_store in there somehow, and besides I think it's clearer
558// this way.
559static void
560migrate_to_external_privkeys(sqlite3 * db, key_store & keys)
561{
562 {
563 sql stmt(db, 3,
564 "SELECT private_keys.id, private_keys.keydata, public_keys.keydata"
565 " FROM private_keys LEFT OUTER JOIN public_keys"
566 " ON private_keys.id = public_keys.id");
567
568 while (stmt.step())
569 {
570 key_name ident(stmt.column_string(0), origin::database);
571 base64<old_arc4_rsa_priv_key> old_priv(stmt.column_string(1),
572 origin::database);
573 base64<rsa_pub_key> pub;
574
575 if (stmt.column_nonnull(2))
576 pub = base64<rsa_pub_key>(stmt.column_string(2),
577 origin::database);
578
579 P(F("moving key '%s' from database to '%s'")
580 % ident % keys.get_key_dir());
581 keys.migrate_old_key_pair(ident,
582 decode_base64(old_priv),
583 decode_base64(pub));
584 }
585 }
586
587 sql::exec(db, "DROP TABLE private_keys;");
588}
589
590char const migrate_add_rosters[] =
591 "CREATE TABLE rosters"
592 " ( id primary key, -- strong hash of the roster\n"
593 " data not null -- compressed, encoded contents of the roster\n"
594 " );"
595
596 "CREATE TABLE roster_deltas"
597 " ( id not null, -- strong hash of the roster\n"
598 " base not null, -- joins with either rosters.id or roster_deltas.id\n"
599 " delta not null, -- rdiff to construct current from base\n"
600 " unique(id, base)"
601 " );"
602
603 "CREATE TABLE revision_roster"
604 " ( rev_id primary key, -- joins with revisions.id\n"
605 " roster_id not null -- joins with either rosters.id or roster_deltas.id\n"
606 " );"
607
608 "CREATE TABLE next_roster_node_number"
609 " ( node primary key -- only one entry in this table, ever\n"
610 " );"
611 ;
612
613// I wish I had a form of ALTER TABLE COMMENT on sqlite3
614char const migrate_files_BLOB[] =
615 // change the encoding of file(_delta)s
616 "ALTER TABLE files RENAME TO tmp;"
617 "CREATE TABLE files"
618 " ( id primary key, -- strong hash of file contents\n"
619 " data not null -- compressed contents of a file\n"
620 " );"
621 "INSERT INTO files SELECT id, unbase64(data) FROM tmp;"
622 "DROP TABLE tmp;"
623
624 "ALTER TABLE file_deltas RENAME TO tmp;"
625 "CREATE TABLE file_deltas"
626 " ( id not null, -- strong hash of file contents\n"
627 " base not null, -- joins with files.id or file_deltas.id\n"
628 " delta not null, -- compressed rdiff to construct current from base\n"
629 " unique(id, base)"
630 " );"
631 "INSERT INTO file_deltas SELECT id, base, unbase64(delta) FROM tmp;"
632 "DROP TABLE tmp;"
633
634 // migrate other contents which are accessed by get|put_version
635 "UPDATE manifests SET data=unbase64(data);"
636 "UPDATE manifest_deltas SET delta=unbase64(delta);"
637 "UPDATE rosters SET data=unbase64(data) ;"
638 "UPDATE roster_deltas SET delta=unbase64(delta);"
639 "UPDATE db_vars SET value=unbase64(value), name=unbase64(name);"
640 "UPDATE public_keys SET keydata=unbase64(keydata);"
641 "UPDATE revision_certs SET value=unbase64(value),"
642 " signature=unbase64(signature);"
643 "UPDATE manifest_certs SET value=unbase64(value),"
644 " signature=unbase64(signature);"
645 "UPDATE revisions SET data=unbase64(data);"
646 "UPDATE branch_epochs SET branch=unbase64(branch);"
647 ;
648
649char const migrate_rosters_no_hash[] =
650 "DROP TABLE rosters;"
651 "DROP TABLE roster_deltas;"
652 "DROP TABLE revision_roster;"
653
654 "CREATE TABLE rosters"
655 " ( id primary key, -- a revision id\n"
656 " checksum not null, -- checksum of 'data', to protect against"
657 " disk corruption\n"
658 " data not null -- compressed, encoded contents of the roster\n"
659 " );"
660
661 "CREATE TABLE roster_deltas"
662 " ( id primary key, -- a revision id\n"
663 " checksum not null, -- checksum of 'delta', to protect against"
664 " disk corruption\n"
665 " base not null, -- joins with either rosters.id or roster_deltas.id\n"
666 " delta not null -- rdiff to construct current from base\n"
667 " );"
668 ;
669
670char const migrate_add_heights[] =
671 "CREATE TABLE heights"
672 " ( revision not null, -- joins with revisions.id\n"
673 " height not null, -- complex height, array of big endian u32 integers\n"
674 " unique(revision, height)"
675 " );"
676 ;
677
678// this is a function because it has to refer to the numeric constant
679// defined in migration.hh.
680static void
681migrate_add_ccode(sqlite3 * db, key_store &)
682{
683 string cmd = "PRAGMA user_version = ";
684 cmd += boost::lexical_cast<string>(mtn_creator_code);
685 sql::exec(db, cmd.c_str());
686}
687
688char const migrate_add_heights_index[] =
689 "CREATE INDEX heights__height ON heights (height);"
690 ;
691
692char const migrate_to_binary_hashes[] =
693 "UPDATE files SET id=unhex(id);"
694 "UPDATE file_deltas SET id=unhex(id), base=unhex(base);"
695 "UPDATE revisions SET id=unhex(id);"
696 "UPDATE revision_ancestry SET parent=unhex(parent), child=unhex(child);"
697 "UPDATE heights SET revision=unhex(revision);"
698 "UPDATE rosters SET id=unhex(id), checksum=unhex(checksum);"
699 "UPDATE roster_deltas SET id=unhex(id), base=unhex(base), "
700 " checksum=unhex(checksum);"
701 "UPDATE public_keys SET hash=unhex(hash);"
702
703 // revision_certs also gets a new index, so we recreate the
704 // table completely.
705 "ALTER TABLE revision_certs RENAME TO tmp;\n"
706 "CREATE TABLE revision_certs"
707 " ( hash not null unique, -- hash of remaining fields separated by \":\"\n"
708 " id not null, -- joins with revisions.id\n"
709 " name not null, -- opaque string chosen by user\n"
710 " value not null, -- opaque blob\n"
711 " keypair not null, -- joins with public_keys.id\n"
712 " signature not null, -- RSA/SHA1 signature of \"[name@id:val]\"\n"
713 " unique(name, value, id, keypair, signature)\n"
714 " );"
715 "INSERT INTO revision_certs SELECT unhex(hash), unhex(id), name, value, keypair, signature FROM tmp;"
716 "DROP TABLE tmp;"
717 "CREATE INDEX revision_certs__id ON revision_certs (id);"
718
719 // We altered a comment on this table, thus we need to recreate it.
720 // Additionally, this is the only schema change, so that we get another
721 // schema hash to upgrade to.
722 "ALTER TABLE branch_epochs RENAME TO tmp;"
723 "CREATE TABLE branch_epochs"
724 " ( hash not null unique, -- hash of remaining fields separated by \":\"\n"
725 " branch not null unique, -- joins with revision_certs.value\n"
726 " epoch not null -- random binary id\n"
727 " );"
728 "INSERT INTO branch_epochs SELECT unhex(hash), branch, unhex(epoch) FROM tmp;"
729 "DROP TABLE tmp;"
730
731 // To be able to migrate from pre-roster era, we also need to convert
732 // these deprecated tables
733 "UPDATE manifests SET id=unhex(id);"
734 "UPDATE manifest_deltas SET id=unhex(id), base=unhex(base);"
735 "UPDATE manifest_certs SET id=unhex(id), hash=unhex(hash);"
736 ;
737
738
739char const migrate_certs_to_key_hash[] =
740 "ALTER TABLE public_keys rename to public_keys_tmp;\n"
741 "CREATE TABLE public_keys\n"
742 " ( id primary key, -- hash of remaining fields separated by \":\"\n"
743 " name not null, -- key identifier chosen by user\n"
744 " keydata not null -- RSA public params\n"
745 " );\n"
746 "INSERT INTO public_keys (id, name, keydata)"
747 " SELECT hash, id, keydata FROM public_keys_tmp;\n"
748 "DROP TABLE public_keys_tmp;\n"
749
750 "ALTER TABLE revision_certs rename to revision_certs_tmp;\n"
751 "CREATE TABLE revision_certs\n"
752 " ( hash not null unique, -- hash of remaining fields separated by \":\"\n"
753 " revision_id not null, -- joins with revisions.id\n"
754 " name not null, -- opaque string chosen by user\n"
755 " value not null, -- opaque blob\n"
756 " keypair_id not null, -- joins with public_keys.id\n"
757 " signature not null, -- RSA/SHA1 signature of \"[name@id:val]\"\n"
758 " unique(name, value, revision_id, keypair_id, signature)\n"
759 " );\n"
760 "CREATE INDEX revision_certs__revision_id ON revision_certs (revision_id);\n"
761
762 "INSERT INTO revision_certs(hash, revision_id, name, value, keypair_id, signature)\n"
763 "SELECT a.hash, a.id, a.name, a.value, b.id, a.signature\n"
764 "FROM revision_certs_tmp a JOIN public_keys b\n"
765 "ON a.keypair = b.name;\n"
766
767 "DROP TABLE revision_certs_tmp;"
768 ;
769
770char const migrate_better_cert_indexing[] =
771 "DROP INDEX revision_certs__revision_id;\n"
772 "CREATE INDEX revision_certs__revnameval ON revision_certs (revision_id,\n"
773 " name, value, keypair_id, signature);";
774
775namespace {
776 struct branch_leaf_finder_info
777 {
778 std::set<string> parents;
779 std::set<string> branches;
780 std::set<string> known_descendant_branches;
781 int num_children;
782 branch_leaf_finder_info() : num_children(0) {};
783 };
784}
785static void
786migrate_add_branch_leaf_cache(sqlite3 * db, key_store & keys)
787{
788 sql::exec(db,
789 "CREATE TABLE branch_leaves\n"
790 " (\n"
791 " branch not null, -- joins with revision_certs.value\n"
792 " revision_id not null, -- joins with revisions.id\n"
793 " unique(branch, revision_id)\n"
794 " )");
795
796 typedef std::map<string, branch_leaf_finder_info> anc_etc_map;
797 anc_etc_map ancestry_etc;
798
799 {
800 sql ancestry(db, 2, "select child, parent from revision_ancestry "
801 "where parent is not null");
802 while (ancestry.step())
803 {
804 string child = ancestry.column_blob(0);
805 string parent = ancestry.column_blob(1);
806 branch_leaf_finder_info & info = ancestry_etc[child];
807 info.parents.insert(parent);
808 ancestry_etc[parent].num_children++;
809 }
810 }
811 {
812 sql certs(db, 2,
813 "select revision_id, value "
814 "from revision_certs where name = 'branch'");
815 while (certs.step())
816 {
817 ancestry_etc[certs.column_blob(0)].branches.insert(certs.column_blob(1));
818 }
819 }
820
821 std::set<string> frontier;
822 {
823 for (anc_etc_map::iterator i = ancestry_etc.begin();
824 i != ancestry_etc.end(); ++i)
825 {
826 if (i->second.num_children == 0)
827 {
828 frontier.insert(i->first);
829 }
830 }
831 }
832
833 while (!frontier.empty())
834 {
835 string rev = *frontier.begin();
836 frontier.erase(frontier.begin());
837 branch_leaf_finder_info & my_info = ancestry_etc[rev];
838 for (std::set<string>::iterator b = my_info.branches.begin();
839 b != my_info.branches.end(); ++b)
840 {
841 std::set<string>::iterator desc = my_info.known_descendant_branches.find(*b);
842 if (desc != my_info.known_descendant_branches.end())
843 continue;
844 string q = string("insert into branch_leaves(branch, revision_id) "
845 "values(X'")
846 + encode_hexenc(*b, origin::internal) + "', X'"
847 + encode_hexenc(rev, origin::internal) + "')";
848 sql::exec(db, q.c_str());
849 }
850 for (std::set<string>::iterator p = my_info.parents.begin();
851 p != my_info.parents.end(); ++p)
852 {
853 branch_leaf_finder_info & parent_info = ancestry_etc[*p];
854 for (std::set<string>::iterator b = my_info.branches.begin();
855 b != my_info.branches.end(); ++b)
856 {
857 parent_info.known_descendant_branches.insert(*b);
858 }
859 for (std::set<string>::iterator b = my_info.known_descendant_branches.begin();
860 b != my_info.known_descendant_branches.end(); ++b)
861 {
862 parent_info.known_descendant_branches.insert(*b);
863 }
864 parent_info.num_children--;
865 if (parent_info.num_children == 0)
866 frontier.insert(*p);
867 }
868 }
869}
870
871char const migrate_add_file_sizes[] =
872 "CREATE TABLE file_sizes\n"
873 " (\n"
874 " id primary key, -- joins with files.id or file_deltas.id\n"
875 " size not null -- the size of the file in byte\n"
876 " );";
877
878
879// these must be listed in order so that ones listed earlier override ones
880// listed later
881enum upgrade_regime
882 {
883 upgrade_changesetify,
884 upgrade_rosterify,
885 upgrade_regen_caches,
886 upgrade_none,
887 };
888static void
889dump(enum upgrade_regime const & regime, string & out)
890{
891 switch (regime)
892 {
893 case upgrade_changesetify: out = "upgrade_changesetify"; break;
894 case upgrade_rosterify: out = "upgrade_rosterify"; break;
895 case upgrade_regen_caches: out = "upgrade_regen_caches"; break;
896 case upgrade_none: out = "upgrade_none"; break;
897 default: out = (FL("upgrade_regime(%d)") % regime).str(); break;
898 }
899}
900
901typedef void (*migrator_cb)(sqlite3 *, key_store &);
902
903// Exactly one of migrator_sql and migrator_func should be non-null in
904// all entries in migration_events, except the very last.
905struct migration_event
906{
907 char const * id;
908 char const * migrator_sql;
909 migrator_cb migrator_func;
910 upgrade_regime regime;
911 regen_cache_type regen_type;
912};
913
914// IMPORTANT: whenever you modify this to add a new schema version, you must
915// also add a new migration test for the new schema version. See
916// tests/schema_migration for details.
917
918const migration_event migration_events[] = {
919 { "edb5fa6cef65bcb7d0c612023d267c3aeaa1e57a",
920 migrate_merge_url_and_group, 0, upgrade_none, regen_none},
921
922 { "f042f3c4d0a4f98f6658cbaf603d376acf88ff4b",
923 migrate_add_hashes_and_merkle_trees, 0, upgrade_none, regen_none },
924
925 { "8929e54f40bf4d3b4aea8b037d2c9263e82abdf4",
926 migrate_to_revisions, 0, upgrade_changesetify, regen_none },
927
928 { "c1e86588e11ad07fa53e5d294edc043ce1d4005a",
929 migrate_to_epochs, 0, upgrade_none, regen_none },
930
931 { "40369a7bda66463c5785d160819ab6398b9d44f4",
932 migrate_to_vars, 0, upgrade_none, regen_none },
933
934 { "e372b508bea9b991816d1c74680f7ae10d2a6d94",
935 migrate_add_indexes, 0, upgrade_none, regen_none },
936
937 { "1509fd75019aebef5ac3da3a5edf1312393b70e9",
938 0, migrate_to_external_privkeys, upgrade_none, regen_none },
939
940 { "bd86f9a90b5d552f0be1fa9aee847ea0f317778b",
941 migrate_add_rosters, 0, upgrade_rosterify, regen_none },
942
943 { "1db80c7cee8fa966913db1a463ed50bf1b0e5b0e",
944 migrate_files_BLOB, 0, upgrade_none, regen_none },
945
946 { "9d2b5d7b86df00c30ac34fe87a3c20f1195bb2df",
947 migrate_rosters_no_hash, 0, upgrade_regen_caches, regen_rosters },
948
949 { "ae196843d368d042f475e3dadfed11e9d7f9f01e",
950 migrate_add_heights, 0, upgrade_regen_caches, regen_heights },
951
952 { "48fd5d84f1e5a949ca093e87e5ac558da6e5956d",
953 0, migrate_add_ccode, upgrade_none, regen_none },
954
955 { "fe48b0804e0048b87b4cea51b3ab338ba187bdc2",
956 migrate_add_heights_index, 0, upgrade_none, regen_none },
957
958 { "7ca81b45279403419581d7fde31ed888a80bd34e",
959 migrate_to_binary_hashes, 0, upgrade_none, regen_none },
960
961 { "212dd25a23bfd7bfe030ab910e9d62aa66aa2955",
962 migrate_certs_to_key_hash, 0, upgrade_none, regen_none },
963
964 { "9c8d5a9ea8e29c69be6459300982a68321b0ec12",
965 0, migrate_add_branch_leaf_cache, upgrade_none, regen_branches },
966
967 { "0c956abae3e52522e4e0b7c5cbe7868f5047153e",
968 migrate_add_file_sizes, 0, upgrade_regen_caches, regen_file_sizes },
969
970 { "1f60cec1b0f6c8c095dc6d0ffeff2bd0af971ce1",
971 migrate_better_cert_indexing, 0, upgrade_none, regen_none },
972
973 // The last entry in this table should always be the current
974 // schema ID, with 0 for the migrators.
975 { "c3a13c60edc432f9a7739f8a260565d77112c86e", 0, 0, upgrade_none, regen_none }
976};
977const size_t n_migration_events = (sizeof migration_events
978 / sizeof migration_events[0]);
979
980// unfortunately, this has to be aware of the migration_events array and its
981// limits, lest we crash trying to print the garbage on either side.
982static void
983dump(struct migration_event const * const & mref, string & out)
984{
985 struct migration_event const * m = mref;
986 ptrdiff_t i = m - migration_events;
987 if (m == 0)
988 out = "invalid migration event (null pointer)";
989 else if (i < 0 || static_cast<size_t>(i) >= n_migration_events)
990 out = (FL("invalid migration event, index %ld/%lu")
991 % i % n_migration_events).str();
992 else
993 {
994 char const * type;
995 if (m->migrator_sql)
996 type = "SQL only";
997 else if (m->migrator_func)
998 type = "codeful";
999 else
1000 type = "none (current)";
1001
1002 string regime;
1003 dump(m->regime, regime);
1004
1005 out = (FL("migration %ld/%lu: %s, %s, from %s")
1006 % i % n_migration_events % type % regime % m->id).str();
1007 }
1008}
1009
1010// The next several functions are concerned with calculating the schema hash
1011// and determining whether a database is usable (with or without migration).
1012static void
1013calculate_schema_id(sqlite3 * db, string & ident)
1014{
1015 sql stmt(db, 1,
1016 "SELECT sql FROM sqlite_master "
1017 "WHERE (type = 'table' OR type = 'index') "
1018 // filter out NULL statements, because
1019 // those are auto-generated indices (for
1020 // UNIQUE constraints, etc.).
1021 "AND sql IS NOT NULL "
1022 "AND name not like 'sqlite_stat%' "
1023 "ORDER BY name");
1024
1025 string schema;
1026 using boost::char_separator;
1027 typedef boost::tokenizer<char_separator<char> > tokenizer;
1028 char_separator<char> sep(" \r\n\t", "(),;");
1029
1030 while (stmt.step())
1031 {
1032 string table_schema(stmt.column_string(0));
1033 tokenizer tokens(table_schema, sep);
1034 for (tokenizer::iterator i = tokens.begin(); i != tokens.end(); i++)
1035 {
1036 if (!schema.empty())
1037 schema += " ";
1038 schema += *i;
1039 }
1040 }
1041
1042 u32 code = sql::value(db, "PRAGMA user_version");
1043 if (code != 0)
1044 {
1045 schema += " PRAGMA user_version = ";
1046 schema += boost::lexical_cast<string>(code);
1047 }
1048
1049 id tid;
1050 calculate_ident(data(schema, origin::database), tid);
1051 ident = encode_hexenc(tid(), tid.made_from);
1052 L(FL("calculated schema id %s") % ident);
1053}
1054
1055// Look through the migration_events table and return a pointer to the entry
1056// corresponding to database DB, or null if it isn't there (i.e. if the
1057// database schema is not one we know).
1058static migration_event const *
1059find_migration(sqlite3 * db)
1060{
1061 string id;
1062 calculate_schema_id(db, id);
1063
1064 for (migration_event const *m = migration_events + n_migration_events - 1;
1065 m >= migration_events; m--)
1066 if (m->id == id)
1067 return m;
1068
1069 return 0;
1070}
1071
1072// This enumerates the possible mismatches between the monotone executable
1073// and its database.
1074enum schema_mismatch_case
1075 {
1076 SCHEMA_MATCHES = 0,
1077 SCHEMA_MIGRATION_NEEDED,
1078 SCHEMA_TOO_NEW,
1079 SCHEMA_NOT_MONOTONE,
1080 SCHEMA_EMPTY
1081 };
1082static void dump(schema_mismatch_case const & cat, std::string & out)
1083{
1084 switch (cat)
1085 {
1086 case SCHEMA_MATCHES: out = "SCHEMA_MATCHES"; break;
1087 case SCHEMA_MIGRATION_NEEDED: out = "SCHEMA_MIGRATION_NEEDED"; break;
1088 case SCHEMA_TOO_NEW: out = "SCHEMA_TOO_NEW"; break;
1089 case SCHEMA_NOT_MONOTONE: out = "SCHEMA_NOT_MONOTONE"; break;
1090 case SCHEMA_EMPTY: out = "SCHEMA_EMPTY"; break;
1091 default: out = (FL("schema_mismatch_case(%d)") % cat).str(); break;
1092 }
1093}
1094
1095
1096static schema_mismatch_case
1097classify_schema(sqlite3 * db, migration_event const * m = 0)
1098{
1099 if (!m)
1100 m = find_migration(db);
1101
1102 if (m)
1103 {
1104 if (m->migrator_sql || m->migrator_func)
1105 return SCHEMA_MIGRATION_NEEDED;
1106 else
1107 return SCHEMA_MATCHES;
1108 }
1109 else
1110 {
1111 // Distinguish an utterly empty database, such as is created by
1112 // "mtn db load < /dev/null", or by the sqlite3 command line utility
1113 // if you don't give it anything to do.
1114 if (sql::value(db, "SELECT COUNT(*) FROM sqlite_master") == 0)
1115 return SCHEMA_EMPTY;
1116
1117 // monotone started setting this value in database headers only with
1118 // version 0.33, but all previous versions' databases are recognized
1119 // by their schema hashes.
1120
1121 u32 code = sql::value(db, "PRAGMA user_version");
1122 if (code != mtn_creator_code)
1123 return SCHEMA_NOT_MONOTONE;
1124
1125 return SCHEMA_TOO_NEW;
1126 }
1127}
1128
1129string
1130describe_sql_schema(sqlite3 * db)
1131{
1132 I(db != NULL);
1133 string hash;
1134 calculate_schema_id(db, hash);
1135
1136 switch (classify_schema(db))
1137 {
1138 case SCHEMA_MATCHES:
1139 return (F("%s (usable)") % hash).str();
1140 case SCHEMA_MIGRATION_NEEDED:
1141 return (F("%s (migration needed)") % hash).str();
1142 case SCHEMA_TOO_NEW:
1143 return (F("%s (too new, cannot use)") % hash).str();
1144 case SCHEMA_NOT_MONOTONE:
1145 return (F("%s (not a monotone database)") % hash).str();
1146 case SCHEMA_EMPTY:
1147 return (F("%s (database has no tables!)") % hash).str();
1148 default:
1149 I(false);
1150 }
1151}
1152
1153// Provide sensible diagnostics for a database schema whose hash we do not
1154// recognize. (Shared between check_sql_schema and migrate_sql_schema.)
1155static void
1156diagnose_unrecognized_schema(schema_mismatch_case cat,
1157 system_path const & filename)
1158{
1159 E(cat != SCHEMA_EMPTY, origin::user,
1160 F("cannot use the empty sqlite database '%s'\n"
1161 "(monotone databases must be created with '%s db init')")
1162 % filename % prog_name);
1163
1164 E(cat != SCHEMA_NOT_MONOTONE, origin::user,
1165 F("'%s' does not appear to be a monotone database\n")
1166 % filename);
1167
1168 E(cat != SCHEMA_TOO_NEW, origin::user,
1169 F("'%s' appears to be a monotone database, but this version of\n"
1170 "monotone does not recognize its schema.\n"
1171 "You probably need a newer version of monotone.")
1172 % filename);
1173}
1174
1175// check_sql_schema is called by database.cc on open, to determine whether
1176// the schema is up to date. If it returns at all, the schema is indeed
1177// up to date (otherwise it throws a diagnostic).
1178void
1179check_sql_schema(sqlite3 * db, system_path const & filename)
1180{
1181 I(db != NULL);
1182
1183 schema_mismatch_case cat = classify_schema(db);
1184
1185 diagnose_unrecognized_schema(cat, filename);
1186
1187 E(cat != SCHEMA_MIGRATION_NEEDED, origin::user,
1188 F("database '%s' is laid out according to an old schema.\n"
1189 "Try '%s db migrate' to upgrade\n"
1190 "(this is irreversible; you may want to make a backup copy first)")
1191 % filename % prog_name);
1192}
1193
1194#ifdef SUPPORT_SQLITE_BEFORE_3003014
1195// import the hex function for old sqlite libraries from database.cc
1196void sqlite3_hex_fn(sqlite3_context *f, int nargs, sqlite3_value **args);
1197#endif
1198
1199
1200migration_status
1201migrate_sql_schema(sqlite3 * db, key_store & keys,
1202 system_path const & filename)
1203{
1204 I(db != NULL);
1205
1206 upgrade_regime regime = upgrade_none; MM(regime);
1207 regen_cache_type regen_type = regen_none;
1208
1209 // Take an exclusive lock on the database before we try to read anything
1210 // from it. If we don't take this lock until the beginning of the
1211 // "migrating data" phase, two simultaneous "db migrate" processes could
1212 // race through the "calculating migration" phase; then one of them would
1213 // wait for the other to finish all the migration steps, and trip over the
1214 // invariant check inside the for loop.
1215 {
1216 transaction guard(db);
1217
1218 P(F("calculating migration..."));
1219
1220 migration_event const *m; MM(m);
1221 schema_mismatch_case cat; MM(cat);
1222 m = find_migration(db);
1223 cat = classify_schema(db, m);
1224
1225 diagnose_unrecognized_schema(cat, filename);
1226
1227 // We really want 'db migrate' on an up-to-date schema to be a no-op
1228 // (no vacuum or anything, even), so that automated scripts can fire
1229 // one off optimistically and not have to worry about getting their
1230 // administrators to do it by hand.
1231 if (cat == SCHEMA_MATCHES)
1232 {
1233 P(F("no migration performed; database schema already up-to-date"));
1234 return migration_status();
1235 }
1236
1237#ifdef SUPPORT_SQLITE_BEFORE_3003014
1238 // SQLite up to and including 3.3.12 didn't have a hex() function
1239 if (sqlite3_libversion_number() <= 3003012)
1240 sql::create_function(db, "hex", sqlite3_hex_fn);
1241#endif
1242
1243 sql::create_function(db, "sha1", sqlite3_sha1_fn);
1244 sql::create_function(db, "sha1_nows", sqlite3_sha1_nows_fn);
1245 sql::create_function(db, "unbase64", sqlite3_unbase64_fn);
1246 sql::create_function(db, "unhex", sqlite3_unhex_fn);
1247
1248 P(F("migrating data..."));
1249
1250 for (;;)
1251 {
1252 // confirm that we are where we ought to be
1253 string id; MM(id);
1254 calculate_schema_id(db, id);
1255
1256 I(id == m->id);
1257 I(!m->migrator_sql || !m->migrator_func);
1258
1259 if (m->migrator_sql)
1260 sql::exec(db, m->migrator_sql);
1261 else if (m->migrator_func)
1262 m->migrator_func(db, keys);
1263 else
1264 break;
1265
1266 regime = std::min(regime, m->regime);
1267 // yes, this is ugly, but I don't want to introduce bitwise-enum
1268 // or anything fancy else for this single use case
1269 regen_type = static_cast<regen_cache_type>(regen_type | m->regen_type);
1270
1271 m++;
1272 I(m < migration_events + n_migration_events);
1273 P(F("migrated to schema %s") % m->id);
1274 }
1275
1276 P(F("committing changes to database"));
1277 guard.commit();
1278 }
1279
1280 P(F("optimizing database"));
1281 sql::exec(db, "VACUUM");
1282
1283 switch (regime)
1284 {
1285 case upgrade_changesetify:
1286 case upgrade_rosterify:
1287 {
1288 string command_str = (regime == upgrade_changesetify
1289 ? "changesetify" : "rosterify");
1290 return migration_status(regen_none, command_str);
1291 }
1292 break;
1293 case upgrade_regen_caches:
1294 I(regen_type != regen_none);
1295 return migration_status(regen_type);
1296 break;
1297 case upgrade_none:
1298 break;
1299 }
1300 return migration_status();
1301}
1302
1303// test_migration_step runs the migration step from SCHEMA to its successor,
1304// *without* validating that the database actually conforms to that schema
1305// first. the point of this is to test error recovery from conditions that
1306// are not accessible through normal malformed dumps (because the schema
1307// conformance check will reject them).
1308
1309void
1310test_migration_step(sqlite3 * db, key_store & keys,
1311 system_path const & filename,
1312 string const & schema)
1313{
1314 I(db != NULL);
1315
1316#ifdef SUPPORT_SQLITE_BEFORE_3003014
1317 // SQLite up to and including 3.3.12 didn't have a hex() function
1318 if (sqlite3_libversion_number() <= 3003012)
1319 sql::create_function(db, "hex", sqlite3_hex_fn);
1320#endif
1321
1322 sql::create_function(db, "sha1_nows", sqlite3_sha1_nows_fn);
1323 sql::create_function(db, "sha1", sqlite3_sha1_fn);
1324 sql::create_function(db, "unbase64", sqlite3_unbase64_fn);
1325 sql::create_function(db, "unhex", sqlite3_unhex_fn);
1326
1327 transaction guard(db);
1328
1329 migration_event const *m;
1330 for (m = migration_events + n_migration_events - 1;
1331 m >= migration_events; m--)
1332 if (schema == m->id)
1333 break;
1334
1335 E(m >= migration_events, origin::user,
1336 F("cannot test migration from unknown schema %s") % schema);
1337
1338 E(m->migrator_sql || m->migrator_func, origin::user,
1339 F("schema %s is up to date") % schema);
1340
1341 L(FL("testing migration from %s to %s\n in database '%s'")
1342 % schema % m[1].id % filename);
1343
1344 if (m->migrator_sql)
1345 sql::exec(db, m->migrator_sql);
1346 else
1347 m->migrator_func(db, keys);
1348
1349 // in the unlikely event that we get here ...
1350 P(F("successful migration to schema %s") % m[1].id);
1351 guard.commit();
1352}
1353
1354
1355// Local Variables:
1356// mode: C++
1357// fill-column: 76
1358// c-file-style: "gnu"
1359// indent-tabs-mode: nil
1360// End:
1361// vim: et:sw=2:sts=2:ts=2:cino=>2s,{s,\:s,+s,t0,g0,^-2,e-2,n-2,p2s,(0,=s:

Archive Download this file

Branches

Tags

Quick Links:     www.monotone.ca    -     Downloads    -     Documentation    -     Wiki    -     Code Forge    -     Build Status