· 5 years ago · Oct 04, 2020, 04:32 PM
1#!/usr/bin/env bash
2## dmp-mdb-iter.sh
3## Dump a MariaDB / MySQL to two files, a database/table definition file and a table values data file.
4## By: Ctrl-S
5## Created: 2020-10-04
6## Modified: 2020-10-05
7
8
9## == Config vars ==
10## Set these to what is appropriate for you.
11db_host="localhost"
12db_username="root"
13db_password="mypassword" # MariaDB password
14db_name="mydb"
15output_dir="${HOME}/db_dumps/" # Must have trailing slash, must be a dir, must exist.
16logfile="dbdump_log.txt"
17
18## == Dynamically generated vars ==
19## You probably don't need to touch these.
20timestamp="`date -u +%s`" ## Use a single timestamp for all of the dump tasks
21dump_name="${db_name}.ts${timestamp}" # Some sensible name for the dump.
22## dump_name="${db_name}" # Uncomment for static filename.
23defs_filepath="${output_dir}${dump_name}.defs.sql" # Filepath to store table definitions to.
24data_filepath="${output_dir}${dump_name}.data.sql" # Filepath to store actual data to.
25
26
27echo "This DB dump is named: ${dump_name}" | tee -a "${logfile}"
28
29
30## == Dump defs ==
31## Dump the DB and table definitions:
32echo "Now dumping defs..." | tee -a "${logfile}"
33defs_args=( # Use array for better commenting of args.
34 -h"${db_host}" -u"${db_username}" -p"${db_password}"
35 --tz-utc # Convert to UTC for export so that timstamps are preserved across timezones.
36 --quick # Fetch and dump one row at a time sequentially, for large databses.
37 --opt
38 --single-transaction # Use a transaction for consistent DB state in dump. (Needs InnoDB to do much.)
39 --no-data # Do not store any rows.
40 --skip-lock-tables # Prevent locking tables during dump process. (To prevent breaking asagi)
41 --result-file="${defs_filepath}" # Direct output to a given file.
42 "${db_name}"
43 )
44echo "defs_args=${defs_args[@]}" | tee -a "${logfile}" # Store args to file.
45echo "defs_args='${defs_args[@]}'" | tee -a "${logfile}" # TESTING: Record args with quoting and escaping for easier time reusing them.
46mysqldump "${defs_args[@]}"
47## $mysqldump -h"${db_host}" -u"${db_username}" -p"${db_password}" --tz-utc --quick --opt --single-transaction --no-data --skip-lock-tables --result-file="${defs_filepath}" "${db_name}"
48echo "Done dumping defs" | tee -a "${logfile}"
49ls -lah "${defs_filepath}" | tee -a "${logfile}"
50
51
52## == Dump data ==
53
54## ===== ===== ===== =====
55## <LOOP>
56echo "Now dumping data..." | tee -a "${logfile}"
57## Setup vars for loop
58n_start="1" # Low end of range
59n_end="100000" # High end of range
60n_increment="100" # Rows at a time
61n_tmp_low="" # low end of current step
62n_tmp_high="" # high end of current step
63for n in {${n_start}..${n_end}..${n_increment}} ## {START..END..INCREMENT}
64do
65 echo "n=${n}" | tee -a "${logfile}"
66 echo "n='${n}'; n_tmp_low='${n_tmp_low}'; n_tmp_high='${n_tmp_high}';" | tee -a "${logfile}"
67 ## Setup current range values:
68 n_tmp_high="${n}"
69 if ([ -z "${n_tmp_low}" ]) then ## If low end of range is missing, we are in the first cycle and must prepare the values.
70 # We need two n values.
71 echo "Init range values"
72 n_tmp_low="${n_tmp_high}" # Prep next cycle.
73 continue
74 fi
75
76 ## Prep data filepath for this cycle:
77 data_filepath="${output_dir}${dump_name}.data.l${n_tmp_low}-h${n_tmp_high}.sql" # Filepath to store actual data to.
78
79 # Prepare WHERE statment:
80 # LINK: https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html#option_mysqldump_where
81 ## From tokaro dump defs:
82 ## PRIMARY KEY (`doc_id`),
83 ## UNIQUE KEY `num_subnum_index` (`num`,`subnum`),
84 stmt="WHERE doc_id >= ${n_tmp_low} AND doc_id <= ${n_tmp_high}"
85 echo "stmt=${stmt}" | tee -a "${logfile}"
86
87 ## Dump the table data:
88 data_args=( # Use array for better commenting of args
89 -h"${db_host}" -u"${db_username}" -p"${db_password}"
90 --tz-utc # Convert to UTC for export so that timstamps are preserved across timezones.
91 --quick # Fetch and dump one row at a time sequentially, for large databses.
92 --opt
93 --single-transaction # Use a transaction for consistent DB state in dump. (Needs InnoDB to do much.)
94 --no-create-db # This option suppresses the CREATE DATABASE ... IF EXISTS statement...
95 --no-create-info # Do not write CREATE TABLE statements which re-create each dumped table.
96 --skip-lock-tables # Prevent locking tables during dump process. (To prevent breaking asagi)
97 --where="${stmt}"
98 --result-file="${data_filepath}" # Direct output to a given file.
99 "${db_name}"
100 )
101 echo "data_args='${data_args[@]}'" | tee -a "${logfile}" # Store args to file.
102 mysqldump "${data_args[@]}"
103 ##$ mysqldump -h"${db_host}" -u"${db_username}" -p"${db_password}" --tz-utc --quick --opt --single-transaction --no-create-db --no-create-info --skip-lock-tables --result-file="${data_filepath}" "${db_name}"
104 ls -lah "${data_filepath}" | tee -a "${logfile}"
105
106 n_tmp_low="${n_tmp_high}" # Prep next cycle.
107done
108
109echo "Done dumping data" | tee -a "${logfile}"
110
111## </LOOP>
112## ===== ===== ===== =====
113
114
115
116# Show what we did, so user knows files are not empty.
117echo "Files produced:" | tee -a "${logfile}"
118ls -lah "${defs_filepath}" | tee -a "${logfile}"
119ls -lah "${output_dir}${dump_name}."*"data.sql" | tee -a "${logfile}" # ALL data files from this job.
120
121
122echo "Finished dumping DB" | tee -a "${logfile}"
123
124
125## == Verbose notes==
126## dump_mariadb_split_long.sh
127## Dump a MariaDB / MySQL to two files, a database/table definition file and a table values data file.
128## By: Ctrl-S
129## Created: 2020-10-03
130## Modified: 2020-10-05
131##
132##
133## I believe that MariaDB and MySQL command should behave about the same.
134## LINK: https://mariadb.com/kb/en/mysqldump/
135## LINK: https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html
136## A seconds-since-epoch name prevents accidental clobbering, and makes it trivial to tell when the dump was produced.
137##
138##
139## To import a dump produced by this script:
140## !!! UNTESTED !!!
141## ( May be destructive if you have a DB with the same name! Use caution. )
142## 1. $ dumpname.defs.sql > mysql -u"USERNAME"
143## 2. $ dumpname.data.sql > mysql -u"USERNAME"
144##
145## Basically the table definitions must be imported before the table values data can be imported.
146##
147##
148##
149##
150## According to POSIX: "foo//bar" means the same as "foo/bar"
151## LINK: https://stackoverflow.com/questions/11226322/how-to-concatenate-two-strings-to-build-a-complete-path#24026057
152## LINK: https://en.wikibooks.org/wiki/Bourne_Shell_Scripting/Variable_Expansion
153##
154##
155## LINK: https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html#option_mysqldump_where