· 6 months ago · Mar 20, 2025, 03:00 AM
1// Scripted by iMagic
2// ZippySQL v1.0
3
4// This is the folder backups are saved to.
5// It should not be included with the other paths
6// since the backups folder itself should not be backed up
7//
8// Make sure (npcserver) has rw rights to this!
9// Additionally, add the following to your folder config:
10
11// file backups/*.*
12
13const BACKUP_FOLDER = "backups/";
14
15// This is the SQLite database Zippy connects to.
16// Make sure to add the following to your serverops
17//
18// database=zippy,zippy
19const DATABASE = "zippy";
20
21// Whether or not to send an echo of current backup processes
22const DEBUG_PROGRESS = false;
23
24// DBNPC properties can have a max length (string length, not array size!)
25// of 60,000 characters. I believe data can still be stored beyond that point
26// without causing errors, it would just be corrupted with unpredictive behavior.
27//
28// We'll use this value to spread data across different
29// properties dynamically so that this isn't an issue
30
31// DO NOT CHANGE THIS VALUE
32const PROPERTY_OVERLOAD_LENGTH = 60000;
33
34// After a conversation with Merlin we found
35// that the game server has a `scriptwrite` limit for
36// file system operations:
37
38// U32 TServerOptions::scriptwritelimit = 10000000;
39// Going over this limit within an unknown timeframe
40// (assumed to be 1 minute) seems to be what causes
41// flood alerts.
42
43// DO NOT CHANGE THIS VALUE
44const SERVER_WRITE_LIMIT = 10000000;
45
46// Feel free to change these at will
47enum FLOOD_CONTROL {
48 // How many files to attempt processing at once
49 // increasing this value increases the chances of
50 // flood alerts
51 //#
52 // Greater values increase the risk of flood alerts
53 // Smaller values make backup processes last longer
54 CHUNK_SIZE = 50,
55 // Filesystem read/write operations in this script trigger
56 // a "tick". The write limit is the max amount of ticks
57 // we can trigger before enforcing a pause on the script.
58 // Surpassing the SERVER_WRITE_LIMIT value too quickly is
59 // what causes a flood alert. Since this is a server-wide
60 // value, we'll attempt to use no more than 1% of it.
61 // If you own a smaller server you should be able to increase
62 // the percent to 0.01 for 10%
63 //#
64 // Larger values increase the risk of flood alerts
65 WRITE_LIMIT = SERVER_WRITE_LIMIT * /* 1% */ 0.001,
66 // Another reason why the script may decide to take
67 // a nap is because it's causing too much stress on
68 // the server. Adjust the max CPU usage to match your
69 // server's needs.
70 //#
71 // Larger values reduce the risk of flood alerts
72 // Lower values cause the entire backup process
73 // to last longer
74 CPU_STRESS_THRESHOLD = 0.45,
75 // After "ticks" reaches it's threshold, this determines
76 // how many seconds the script will sleep for
77 // Ideally this value should remain 60 since the server
78 // write limit seems to be in a minute timer before
79 // allowing more write operations.
80 //#
81 // Smaller values increase the risk of flood alerts
82 NAP_TIME = 60
83};
84
85// Enums for paths
86// Make sure the (npcserver) has AT LEAST read access to these folders!
87enum PATHS {
88 ACCOUNTS = "accounts/",
89 DATABASES = "databases/",
90 NPCS = "npcs/",
91 LOGS = "logs/",
92 LEVELS = "levels/",
93 TEMP = "temp/",
94 SCRIPT_FILES = "scriptfiles/",
95 SCRIPTS = "scripts/",
96 STORAGE = "storage/",
97 WEAPONS = "weapons/"
98};
99
100enum SAVE_OPTIONS {
101 ACCOUNTS = 1 << 0,
102 DATABASES = 1 << 1,
103 NPCS = 1 << 2,
104 LOGS = 1 << 3,
105 LEVELS = 1 << 4,
106 TEMP = 1 << 5,
107 SCRIPT_FILES = 1 << 6,
108 SCRIPTS = 1 << 7,
109 STORAGE = 1 << 8,
110 WEAPONS = 1 << 9
111};
112
113// Compute it dynamically based on the highest bit in the enum
114// Update this if the highest bit changes!!
115const SAVE_OPTIONS_ALL = (SAVE_OPTIONS.WEAPONS << 1) - 1;
116
117// Handy shorthands for setting up
118// backup lifespans
119enum TIME {
120 MINUTE = 60,
121 HOUR = 3600,
122 DAY = 86400,
123 WEEK = 604800,
124 AVG_MONTH = 2630016,
125 // We use NEVER to more explicitly say that we dont
126 // want a backup to be deleted. Moreover, we specifically
127 // add NEVER at this position so that it inherits the value
128 // of AVG_MONTH + 1 which helps differenciate it from NULL
129 // in logic checks
130 NEVER
131};
132
133// This is the default lifetime for every backup.
134// For backups to persist forever or until manually deleted
135// from the database (WHICH IS NOT RECOMMENDED) TIME.NEVER must be passed to
136// onStartBackup
137const DEFAULT_BACKUP_LIFESPAN = TIME.AVG_MONTH;
138
139function onInitialized() {
140 this.scheduleEvent(this, "onCreated", 5);
141}
142
143function onCreated() {
144 // This is only allowed in specific servers.
145 // If available, this would take advantage of that
146 this.looplimit = 0xffffffff;
147 // Stop any on-going processes
148 this.cancelevents("onZipPaths");
149
150 // Run this function to setup the SQL tables
151 //onInitialSetup();
152}
153
154/************************************************************************
155*************************** EXAMPLES ******************************
156************************************************************************/
157
158// The example functions all have a function modifier of 'public'.
159// This is just to show that these functions can be called
160// externally by other scripts like, say, a cron scheduler.
161
162 function onFullBackupExample() {
163 // In this THEORETICAL scenario, we set a configuration
164 // bit that toggles all of the folders except
165 // the levels folder because of how big it can
166 // be (not that all of the others combined aren't!).
167 //
168 // Although this tool can theoretically
169 // backup all of the files on the server,
170 // it should never be used for that purpose.
171
172 temp.config =
173 SAVE_OPTIONS.NPCS |
174 SAVE_OPTIONS.DATABASES |
175 SAVE_OPTIONS.WEAPONS |
176 SAVE_OPTIONS.LOGS |
177 SAVE_OPTIONS.STORAGE |
178 SAVE_OPTIONS.SCRIPT_FILES |
179 SAVE_OPTIONS.SCRIPTS;
180
181 // Which can also be written like this
182 //temp.config = SAVE_OPTIONS.LEVELS & ~SAVE_OPTIONS_ALL;
183
184 // This initiates the backup process
185 onStartBackup(null, temp.config, false, TIME.WEEK);
186}
187
188public function onLogsBackupExample() {
189 /**
190 In this example we create a backup of JUST the logs folder that expires
191 every month. It also immediately outputs the log into the backups folder as a zip
192 file.
193
194 - The first argument is an identifier of sorts that is added
195 to the zip file's filename so that you can tell them apart
196 easier.
197
198 - The second argument is the bit for the LOGS folder.
199
200 - The third argument is a boolean that determines whether or not
201 the backup is written as a zip file to the backups folder
202
203 With this configuration we can expect a similar filename
204 to be created for our backup file
205
206 backups/2025_03_14_logs_backup_69d8e420.zip
207 */
208
209 // This initiates the backup process
210 onStartBackup("logs_backup", SAVE_OPTIONS.LOGS, true, TIME.WEEK);
211}
212
213public function onCustomBackupExample() {
214 // In this example we want to create a more complex
215 // backup file that only lasts 3 days. The `additional_paths`
216 // argument gives us a lot of flexibility when choosing what
217 // is backed up.
218
219 // For example's sake, the following paths exist exclusively
220 // for the purposes of demonstration
221
222 temp.custom_backup_paths = {
223 // Backup all of the files that may be
224 // on the favorite books folder
225 "books/favorites/",
226 // Add a singular file to the backup
227 "books/scifi/snow_crash.pdf", // Neal Stephenson's cyberpunk novel that coined the term "Metaverse"
228 // Backup just the '.png' files matching a pattern
229 // '*' here is a wildcard including anything
230 // that may come before the filename
231 "books/covers/cover_*.png"
232 };
233
234 // With this configuration we can expect a filename similar
235 // to this one to be created for our backup:
236 // backups/2025_03_09_books-images_69d8e420.zip
237
238 onStartBackup("books-images", null, true, TIME.DAY * 3, temp.custom_backup_paths);
239}
240
241/************************************************************************
242************************ CORE FUNCTIONS ***************************
243************************************************************************/
244
245function onInitialSetup() {
246 onBackupEcho(format("Setting up ZippySQL..."));
247
248 // Enable auto vacuuming after row deletion. This helps
249 // reduce the filesize of the SQLite database when
250 // large rows are deleted. It does come with some
251 // overhead costs that are worth it.
252 // This must be done when the database is new and
253 // there are yet no tables
254 onExecuteSQL("PRAGMA auto_vacuum = 2");
255
256 // Create the backups table
257 // This is where the final zips are stored
258 temp.sql = onExecuteSQL(
259 "CREATE TABLE IF NOT EXISTS backups (" @
260 "id TEXT NOT NULL UNIQUE," @
261 "created_at INTEGER NOT NULL," @
262 "remove_at INTEGER," @
263 "zip BLOB NOT NULL" @
264 ")", true
265 );
266
267 if (temp.sql.error) {
268 onBackupEcho("Failed to create backups table:" SPC temp.sql.error);
269 return;
270 }
271
272 temp.sql = onExecuteSQL(
273 "CREATE UNIQUE INDEX IF NOT EXISTS idx_backups_id_unique ON backups(id)"
274 );
275
276 if (temp.sql.error) {
277 onBackupEcho("Failed to create index for backups table:" SPC temp.sql.error);
278 return;
279 }
280
281 // Create the data table
282 temp.sql = onExecuteSQL(
283 "CREATE TABLE IF NOT EXISTS process (" @
284 "id TEXT NOT NULL UNIQUE," @
285 "total_files INTEGER NOT NULL DEFAULT 0," @
286 "processed_files INTEGER NOT NULL DEFAULT 0," @
287 "processed_big_files INTEGER NOT NULL DEFAULT 0," @
288 "last_logged_percent REAL NOT NULL DEFAULT 0," @
289 "read_errors INTEGER NOT NULL DEFAULT 0," @
290 "start INTEGER NOT NULL," @
291 "paths TEXT" @
292 ")", true
293 );
294
295 if (temp.sql.error) {
296 onBackupEcho("Failed to create paths table:" SPC temp.sql.error);
297 return;
298 }
299
300 // Create the chunks table
301 temp.sql = onExecuteSQL(
302 "CREATE TABLE IF NOT EXISTS chunks (" @
303 "id TEXT NOT NULL," @
304 "filename TEXT NOT NULL," @
305 "content BLOB NOT NULL" @
306 ")", true
307 );
308
309 if (temp.sql.error) {
310 onBackupEcho("Failed to create chunks table:" SPC temp.sql.error);
311 return;
312 }
313
314 onBackupEcho("Setup complete!");
315}
316
317/**
318 * Starts a backup process with the given parameters.
319
320 * @param {string} temp.custom_identifier
321 A custom identifier to include in the backup name.
322
323 * @param {number} temp.options
324 A bitfield representing the backup options. Each bit corresponds
325 to a specific path to include in the backup.
326
327 * @param {boolean} temp.output_file
328 Whether to output the backup as a file in the backups folder.
329 If `false`, the backup remains as a record in the SQLite database
330 until its lifetime expires.
331
332 * @param {number | null} temp.lifespan
333 An amount of seconds to keep the backup on the database. If null,
334 the backup is permanent unless manually removed.
335
336 * @param {array} temp.additional_paths
337 Additional paths to include in the backup.
338*/
339function onStartBackup(
340 temp.custom_identifier = "",
341 temp.options = 0,
342 temp.output_file = false,
343 temp.lifespan = 0,
344 temp.additional_paths = {}
345) {
346 // Prune expired backups
347 onPruneExpiredBackups();
348
349 // Create a backup identifier name
350 // format is: YYYY_MM_DD_hour_minute_identifier?_randomutf8
351 temp.id = format(
352 "%s_%s%s",
353 formattimestring("%Y_%m_%d", timevar2),
354 temp.custom_identifier == NULL ? "" : temp.custom_identifier @ "_",
355 // This will help identify our process if there are multiple backups happening at once
356 generateRandomChars(8)
357 );
358
359 //#
360 temp.paths = {};
361
362 // Iterate over the options bitfield adding enabled paths
363 if (temp.options != NULL) {
364 for (temp.bit = 1; temp.bit <= SAVE_OPTIONS_ALL; temp.bit <<= 1) {
365 // Check whether or not the current bit is toggled
366 temp.enabled = (temp.options & temp.bit) != 0;
367 if (!temp.enabled) { continue; }
368
369 // Get the name of the path and add it to paths list
370 // assuming we havent added it before
371 temp.path = getPathForOption(temp.bit);
372 if (temp.path && !(temp.path in temp.paths)) {
373 temp.paths.add(temp.path);
374 }
375 }
376 }
377
378 // Check if we should add any additional paths
379 // given as an argument
380 if (temp.additional_paths.size() > 0) {
381 // Add the additional paths to our paths list
382 temp.paths.addarray(temp.additional_paths);
383 temp.paths = getTopLevelPaths(temp.paths);
384 }
385
386 temp.query = onExecuteSQL(format(
387 "INSERT INTO process (id, start) " @
388 "VALUES ('%s', %d)",
389 temp.id, timevar2
390 ), true);
391
392 // Resolve all of the paths into files
393 expandPathsToFiles(temp.id, temp.paths);
394
395 //#
396 temp.query = onExecuteSQL(format(
397 "SELECT total_files FROM process WHERE id = '%s' LIMIT 1",
398 temp.id
399 ));
400
401 if (temp.query.error || !temp.query.total_files) {
402 onBackupEcho(format(
403 "Warning! No paths to backup for '%s'! Stopping...",
404 temp.id
405 ));
406
407 // Delete any residual records that might've been created
408 onDeleteBackupRecords(temp.id);
409 return;
410 }
411
412 // Log the start of the backup process
413 onBackupEcho(format(
414 "Starting backup process with id: %s..." @
415 "\n\tTotal files to backup: %d" @
416 "\n\tEstimated duration: %s",
417 temp.id,
418 temp.query.total_files,
419 estimateBackupDuration(temp.query.total_files)
420 ));
421
422 // Start the zipping
423 this.scheduleEvent(5, "onZipPaths", temp.id, temp.output_file, temp.lifespan);
424}
425
426function onZipPaths(temp.id, temp.output_file, temp.lifespan) {
427 if (!temp.id) { return; }
428
429 // Get the data for the current process
430 temp.process = onExecuteSQL(format(
431 "SELECT * FROM process WHERE id = '%s' LIMIT 1",
432 temp.id
433 ), true);
434
435 // The record of this process could not be found...
436 if (temp.process.error) {
437 onBackupEcho(format("Could not find process record for process: %s. Aborting...", temp.id));
438 onDeleteBackupRecords(temp.id);
439 return;
440 }
441
442 // Extract the paths from the process
443 // We want to resolve the file paths in chunks.
444 // Pick whichever would take the least amount of iterations
445 temp.paths = temp.process.paths.subarray(0, min(temp.process.paths.size(), FLOOD_CONTROL.CHUNK_SIZE));
446 temp.paths_size = temp.paths.size();
447
448 // If we have no more files to process...
449 if (temp.paths_size <= 0) {
450 this.scheduleEvent(5, "onFinishZip", temp.id, temp.output_file, temp.lifespan);
451 return;
452 }
453
454 // This is used in the SQL query to remove the processed
455 // paths from the database
456 temp.paths_length = temp.paths.length();
457
458 for (temp.i = 0; temp.i < temp.paths_size; temp.i++) {
459 temp.filename = temp.paths[temp.i];
460
461 // If this file didnt exist, move on
462 if (!fileexists(temp.filename)) {
463 temp.process.read_errors++;
464 onTick();
465 continue;
466 }
467
468 // Load the contents of the file and get it's length
469 temp.content.loadstring(temp.filename);
470 onTick();
471
472 // Turn the content into a blob
473 temp.content = bintohex(temp.content);
474 temp.content_length = temp.content.length();
475
476 if (temp.content_length <= 0) {
477 temp.process.read_errors++;
478 continue;
479 }
480
481 //#
482 temp.process.processed_files++;
483 temp.cost = temp.filename.length() + temp.content.length();
484 temp.is_big_file = (temp.cost > PROPERTY_OVERLOAD_LENGTH);
485
486 // Increase the count of files processed
487 if (temp.is_big_file) {
488 temp.process.processed_big_files++;
489 }
490
491 onExecuteSQL(format("INSERT INTO chunks VALUES ('%s', '%s', x'%s')", temp.id, temp.filename, temp.content));
492 }
493
494 // Calculate current progress percentage
495 temp.process.last_logged_percent = temp.process.processed_files / temp.process.total_files;
496 temp.closest_tenth = closestTenPercent(temp.process.last_logged_percent);
497
498 // Log progress at percentage intervals (e.g., every 10%)
499 if (DEBUG_PROGRESS && temp.closest_tenth > temp.process.last_logged_percent) {
500 onBackupEcho(format(
501 "Backup progress (%s): %d/%d files processed (%d%%) [%d big files]",
502 temp.id,
503 temp.process.processed_files,
504 temp.process.total_files,
505 temp.closest_tenth * 100,
506 temp.process.processed_big_files
507 ));
508 }
509
510 // Remove the amount of items we'll be iterating over from the database
511 // and update the other progress variables for next iteration
512 onExecuteSQL(format(
513 "UPDATE process " @
514 "SET " @
515 "processed_files = %d," @
516 "processed_big_files = %d," @
517 "last_logged_percent = %f," @
518 "read_errors = %d," @
519 "paths = CASE " @
520 "WHEN LENGTH(paths) <= %d THEN NULL " @
521 "ELSE SUBSTR(paths, %d + 2) " @
522 "END " @
523 "WHERE id = '%s'",
524 temp.process.processed_files,
525 temp.process.processed_big_files,
526 temp.process.last_logged_percent,
527 temp.process.read_errors,
528 temp.paths_length, temp.paths_length,
529 temp.id
530 ));
531
532 // Schedule the next zipping
533 this.scheduleEvent(FLOOD_CONTROL.NAP_TIME * 0.1, "onZipPaths", temp.id, temp.output_file, temp.lifespan);
534}
535
536function onFinishZip(temp.id = "", temp.output_file = false, temp.lifespan = 0) {
537 if (!temp.id) { return; }
538
539 // Solidify the lifespan of the zip file
540 if (temp.lifespan <= 0) {
541 temp.lifespan = DEFAULT_BACKUP_LIFESPAN;
542 } else if (temp.lifespan == TIME.NEVER) {
543 temp.lifespan = null;
544 }
545
546 onBackupEcho("Building final zip file...");
547
548 // Get the data for the current process
549 temp.process = onExecuteSQL(format(
550 "SELECT * FROM process WHERE id = '%s'",
551 temp.id
552 ));
553
554 // We were not able to load the lines of the backup file
555 if (temp.process.error) {
556 onBackupEcho(format(
557 "Warning! Could not load backup file '%s' in final step. Aborting...",
558 temp.id
559 ));
560
561 onDeleteBackupRecords(temp.id);
562 return;
563 }
564
565 // This loads as many as FLOOD_CONTROL.CHUNK_SIZE chunks
566 // into the array and removes the returned rows
567 temp.chunks = onFetchChunksDestructive(temp.id);
568 temp.chunks_size = temp.chunks.rows.size();
569
570 // If no chunks were found or there was an error...
571 if (temp.chunks.error || temp.chunks_size <= 0) {
572 onBackupEcho(format(
573 "Warning! Could not load backup file '%s' in final step. Aborting...",
574 temp.id
575 ));
576
577 onDeleteBackupRecords(temp.id);
578 return;
579 }
580
581 // Create an array to collect the contents of all
582 // of the files to be backed up so that they can be
583 // turned into a zip file
584 temp.zip = {};
585
586 do {
587 // Keep track of the amount of chunks we
588 // started with...
589 temp.initial_chunks = temp.chunks_size;
590
591 // Iterate over all of the chunks, adding the contents
592 // to the final zip file
593 for (temp.chunk : temp.chunks.rows) {
594 temp.chunks_size--;
595 temp.zip.addarray({temp.chunk.filename, hextobin(temp.chunk.content)});
596 }
597
598 // If we had the max amount of chunks, that means
599 // there may still be chunks left to process...
600 if (temp.initial_chunks >= FLOOD_CONTROL.CHUNK_SIZE) {
601 temp.chunks = onFetchChunksDestructive(temp.id);
602 temp.chunks_size = temp.chunks.rows.size();
603 }
604 } while (temp.chunks_size > 0);
605
606 // Remove the process and chunks
607 onExecuteSQL(format("DELETE FROM process WHERE id = '%s'", temp.id));
608 onExecuteSQL(format("DELETE FROM chunks WHERE id = '%s'", temp.id));
609
610 // Convert the file to a zip file and overwrite it...
611 temp.zip = generatezipstring(temp.zip);
612
613 temp.backup = onExecuteSQL(format(
614 "INSERT INTO backups VALUES('%s', %d, %d, x'%s')",
615 temp.id,
616 timevar2,
617 temp.lifespan == null ? 0 : timevar2 + temp.lifespan,
618 bintohex(temp.zip)
619 ), true);
620
621 if (temp.backup.error) {
622 onBackupEcho(format("Aborted! Failed to add backup '%s' with error: %s", temp.id, temp.backup.error));
623 onDeleteBackupRecords(temp.id);
624 return;
625 }
626
627 if (temp.output_file) {
628 temp.filename = format("%s%s.zip", BACKUP_FOLDER, temp.id);
629 temp.zip.savestring(temp.filename, 0);
630 onTick();
631 }
632
633 onBackupEcho(format(
634 "Finished backup '%s':" @
635 "\n\tFiles Processed: %d/%d (%d read errors)" @
636 "\n\tBig Files Processed: %d" @
637 "\n\tDuration: %s" @
638 "%s",
639 temp.id,
640 temp.process.processed_files, temp.process.total_files, temp.process.read_errors,
641 temp.process.processed_big_files,
642 getMinutesAndSecondsFromTime(timevar2 - temp.process.start),
643 temp.output_file ? format("\n\tBackup file: %s (%s)", temp.filename, getFormattedFileSize(filesize(temp.filename))) : ""
644 ));
645}
646
647function onDeleteBackupRecords(temp.id = "") {
648 onExecuteSQL(format("DELETE FROM backups WHERE id = '%s'", temp.id));
649 onExecuteSQL(format("DELETE FROM process WHERE id = '%s'", temp.id));
650 onExecuteSQL(format("DELETE FROM chunks WHERE id = '%s'", temp.id));
651}
652
653function onPruneExpiredBackups() {
654 onExecuteSQL(format(
655 "DELETE * FROM backups WHERE lifespan >= 0 AND lifespan <= %d; VACUUM;", timevar2
656 ));
657}
658
659function onExportBackup(temp.id = "") {
660 // Get the most recent backup
661 temp.backup = onExecuteSQL(format(
662 "SELECT HEX(zip) AS zip " @
663 "FROM backups " @
664 "WHERE id = '%s'" @
665 "ORDER BY created_at DEC " @
666 "LIMIT 1",
667 temp.id
668 ));
669
670 if (temp.backup.error || temp.backup.rows.size() <= 0) {
671 onBackupEcho(format("Error! Could not find backup file for id: '%s'", temp.id));
672 return;
673 }
674
675 temp.content = hextobin(temp.backup.zip);
676 temp.content.savestring(format("%s%s.zip", BACKUPS_FOLDER, temp.id), 0);
677}
678
679function onUpdateFilePaths(temp.id = "", temp.files = {}) {
680 onExecuteSQL(format(
681 "UPDATE process " @
682 "SET " @
683 "total_files = total_files + %d, " @
684 "paths = CASE " @
685 "WHEN paths IS NULL THEN '%s' " @
686 "ELSE paths || '%s' " @
687 "END " @
688 "WHERE id = '%s'",
689 temp.files.size(), temp.files, temp.files, temp.id
690 ));
691}
692
693function onFetchChunksDestructive(temp.id = "") {
694 temp.chunks = onExecuteSQL(format(
695 "SELECT filename, HEX(content) AS content FROM chunks " @
696 "WHERE id = '%s' " @
697 "ORDER BY filename " @
698 "LIMIT %d; ",
699 temp.id, FLOOD_CONTROL.CHUNK_SIZE
700 ));
701
702 onExecuteSQL(format(
703 "DELETE FROM chunks " @
704 "WHERE rowid IN ( " @
705 "SELECT rowid FROM chunks " @
706 "WHERE id = '%s' " @
707 "ORDER BY filename " @
708 "LIMIT %d " @
709 ") ",
710 temp.id, FLOOD_CONTROL.CHUNK_SIZE
711 ));
712
713 return temp.chunks;
714}
715
716/************************************************************************
717************************ UTILITY FUNCTIONS ************************
718************************************************************************/
719function onBackupEcho(temp.msg) {
720 echo(format("[ZippySQL]: %s", temp.msg));
721
722}
723
724function onBackupDebug(temp.msg) {
725 echo(format("[ZippySQL Debug]: %s", temp.msg));
726}
727
728function onCPUTick() {
729 if (this.last_cpu_check < timevar2) {
730 this.last_cpu_check = timevar2 + FLOOD_CONTROL.NAP_TIME * 0.25;
731
732 if (getprocesscpuusage() > FLOOD_CONTROL.CPU_STRESS_THRESHOLD) {
733 onBackupDEbuc("Zippy is taking a CPU usage break");
734 sleep(FLOOD_CONTROL.NAP_TIME);
735 }
736 }
737}
738
739function onTick() {
740 // Trigger a CPU tick which checks CPU Usage
741 onCPUTick();
742
743 // Increase the amount of ticks
744 this.ticks++;
745
746 // Take a little nap... zzz...
747 if (this.ticks >= FLOOD_CONTROL.WRITE_LIMIT) {
748 this.ticks = 0;
749 sleep(FLOOD_CONTROL.NAP_TIME);
750 }
751}
752
753// Returns a folder path as a string given a path as a numeric bit
754function getPathForOption(temp.bit) {
755 switch (temp.bit) {
756 case SAVE_OPTIONS.ACCOUNTS: return PATHS.ACCOUNTS;
757 case SAVE_OPTIONS.DATABASES: return PATHS.DATABASES;
758 case SAVE_OPTIONS.NPCS: return PATHS.NPCS;
759 case SAVE_OPTIONS.LOGS: return PATHS.LOGS;
760 case SAVE_OPTIONS.LEVELS: return PATHS.LEVELS;
761 case SAVE_OPTIONS.TEMP: return PATHS.TEMP;
762 case SAVE_OPTIONS.SCRIPT_FILES: return PATHS.SCRIPT_FILES;
763 case SAVE_OPTIONS.SCRIPTS: return PATHS.SCRIPTS;
764 case SAVE_OPTIONS.STORAGE: return PATHS.STORAGE;
765 case SAVE_OPTIONS.WEAPONS: return PATHS.WEAPONS;
766 default: return NULL;
767 }
768}
769
770/**
771 * Removes duplicate elements from an array by using a two-pointer approach.
772 * This function sorts the array and iterates through it, removing duplicates.
773 * It modifies the input array and returns a new array with only unique elements.
774 *
775 * @param {Array} temp.arr - The array to make unique.
776 * @returns {Array} - A new array with duplicates removed.
777 */
778function makeUniqueTwoPointer(temp.arr = {}) {
779 if (temp.arr.size() <= 1) { return temp.arr; }
780
781 // Create a copy of the array
782 temp.list = temp.arr;
783 temp.list_size = temp.list.size();
784
785 //#
786 temp.list.sortascending();
787
788 // Start at 1 since first element is always unique
789 temp.write_index = 1;
790
791 for (temp.read_index = 1; temp.read_index < temp.list_size; temp.read_index++) {
792 // If current element is different from previous, it's unique
793 if (temp.list[temp.read_index] != temp.list[temp.read_index - 1]) {
794 temp.list[temp.write_index] = temp.list[temp.read_index];
795 temp.write_index++;
796 }
797 }
798
799 // Everything prior to the write index should be unique
800 return temp.list.subarray(0, temp.write_index);
801}
802
803/**
804 * Given an array of paths, this function returns an array of top-level paths.
805 * It separates directories and files, removes subdirectories from directories,
806 * and ensures files are included only if they are not covered by a directory.
807 *
808 * @param {Array} temp.paths - An array containing the paths to be processed
809 * @returns {Array} - An array of top-level directories and files.
810 */
811function getTopLevelPaths(temp.paths = {}) {
812 // This sorts the paths alphabetically and removes possible duplicates
813 temp.paths = makeUniqueTwoPointer(temp.paths);
814 temp.total_paths = temp.paths.size();
815 temp.top_level = {};
816
817 if (temp.total_paths <= 0) {
818 return temp.top_level;
819 }
820
821 for (temp.i = 0; temp.i < temp.total_paths; temp.i++) {
822 temp.is_covered = false;
823
824 for (temp.j = 0; temp.j < temp.total_paths; temp.j++) {
825 if (temp.i == temp.j) { continue; }
826
827 temp.a = temp.paths[temp.i];
828 temp.b = temp.paths[temp.j];
829
830 // This means temp.a is covered by a higher level folder
831 if (isPathCovered(temp.a, temp.b)) {
832 temp.is_covered = true;
833 break;
834 }
835 }
836
837 // Only add uncovered paths to the final array
838 if (!temp.is_covered) {
839 temp.top_level.add(temp.paths[temp.i]);
840 }
841 }
842
843 // Return all of the top-level (uncovered) files
844 return temp.top_level;
845}
846
847/**
848 * This function checks whether or not a path is covered by a potential parent
849 *
850 * @returns {Boolean} - Whether or not the path is covered by the parent.
851 */
852function isPathCovered(temp.path = "", temp.parent = "") {
853 // A path can't cover itself if they're identical
854 if (temp.path == temp.parent) { return true; }
855
856 //#
857 temp.parent_is_folder = (temp.parent.ends("/") && temp.parent == extractfilepath(temp.parent));
858 temp.parent_has_wildcard = temp.parent.pos("*") > -1;
859
860 if (temp.parent_is_folder) {
861 // Return whether or not the parent starts with the
862 return temp.path.starts(temp.parent) && (temp.path != temp.parent);
863 }
864
865 if (temp.parent_has_wildcard) {
866 // Split the parent into prefix and suffix based on the wildcard
867 temp.tokens = temp.parent.tokenize("*");
868 temp.prefix = temp.tokens[0];
869 temp.suffix = temp.tokens[1];
870
871 // If the path starts with the prefix and ends with the suffix
872 if (temp.path.starts(temp.prefix) && temp.path.ends(temp.suffix)) {
873 return true;
874 }
875
876 // If there's no suffix, just check if the path starts with the prefix
877 if (!temp.suffix) {
878 return temp.path.starts(temp.prefix);
879 }
880 }
881
882 // Otherwise, the path is not covered
883 return false;
884}
885
886/**
887 This function takes an array of paths and resolves them
888 into files.
889
890 This step is mostly for paths which are folders or may
891 have a wildcard. Otherwise the path would already point
892 to a file.
893
894 @param {Array} temp.paths - Array of top level paths
895 @param {String} temp.id - the process id for these paths
896
897 @returns {Boolean} - Whether or not paths were resolved successfully
898*/
899function expandPathsToFiles(temp.id = "", temp.paths = {}) {
900 //#
901 temp.paths_size = temp.paths.size();
902 temp.files = {};
903
904 for (temp.path : temp.paths) {
905 temp.is_folder = (temp.path.ends("/") && temp.path == extractfilepath(temp.path));
906 temp.has_wildcard = temp.path.pos("*") > -1;
907
908 // If this is not a folder nor a path with a wildcard
909 // that must mean this is a direct path to a file
910 if (!temp.is_folder && !temp.has_wildcard) {
911 // Check for overflow before adding
912 temp.will_overflow = temp.files.length() + temp.path.length() > PROPERTY_OVERLOAD_LENGTH;
913
914 // If this would cause a property overflow
915 // flush the files array
916 if (temp.will_overflow) {
917 onUpdateFilePaths(temp.id, temp.files);
918 temp.files = {};
919 } else {
920 temp.files.add(temp.path);
921 }
922
923 continue;
924 }
925
926 // At this point, we need to use loadfolder
927 // because the path is either a folder
928 // or has a wildcard
929 temp.found_files = {};
930
931 // We use load folder instead here
932 // since findfiles can only find files
933 // inside of levels/
934
935 // We also add a wildcard for folders in this next line
936 // So that we get all of the files inside it
937 temp.found_files.loadfolder(temp.path @ (temp.is_folder && !temp.path.ends("*") ? "*" : ""), true);
938 temp.found_files_size = temp.found_files.size();
939 onTick();
940
941 // No files were found
942 if (temp.found_files_size <= 0) { continue; }
943
944 // Iterate over all of the found files to add them to our
945 // final object
946 for (temp.j = 0; temp.j < temp.found_files_size; temp.j++) {
947 // TGraalVar.loadfolder returns just filenames, not the paths.
948 // We need to add the path again to each found file
949 temp.found_file = temp.path @ temp.found_files[temp.j];
950
951 // Check for overflow before adding
952 temp.will_overflow = temp.files.length() + temp.found_file.length() > PROPERTY_OVERLOAD_LENGTH;
953
954 // If this would cause a property overflow
955 // flush the files array
956 if (temp.will_overflow) {
957 onUpdateFilePaths(temp.id, temp.files);
958 temp.files = {};
959 } else {
960 temp.files.add(temp.found_file);
961 }
962 }
963 }
964
965 // Add left over files to the paths
966 if (temp.files.size() > 0) {
967 onUpdateFilePaths(temp.id, temp.files);
968 }
969}
970
971function generateRandomChars(temp.length = 0) {
972 temp.chars = "abcdef1234567890";
973 temp.chars_length = temp.chars.length();
974 temp.result = "";
975
976 for (temp.i = 0; temp.i < temp.length; temp.i++) {
977 temp.index = int(random(0, 1) * temp.chars_length);
978 temp.result @= temp.chars.charat(temp.index);
979 }
980
981 return temp.result;
982}
983
984function getFormattedFileSize(temp.bytes = 0) {
985 temp.units = {"b", "kb", "mb", "gb"};
986 temp.index = 0;
987
988 while (temp.bytes >= 1024 && temp.index < temp.units.size() - 1) {
989 temp.bytes /= 1024;
990 temp.index++;
991 }
992
993 return format("%.2f %s", temp.bytes, temp.units[temp.index]);
994}
995
996function estimateBackupDuration(temp.total_files = 0) {
997 const TIME_PER_FILE = 0.1;
998
999 temp.iterations = int(temp.total_tiles / FLOOD_CONTROL.CHUNK_SIZE);
1000 temp.processing_time = temp.total_files * TIME_PER_FILE;
1001
1002 temp.pause_time = temp.iterations * (FLOOD_CONTROL.NAP_TIME * 0.1);
1003 // The 20 is a rough estimate of how much time is spent in this.waitfor's
1004 temp.total_time = 20 + temp.processing_time + temp.pause_time;
1005
1006 // Convert total time to minutes and seconds for readability
1007 return getMinutesAndSecondsFromTime(temp.total_time);
1008}
1009
1010function getMinutesAndSecondsFromTime(temp.time = 0) {
1011 temp.minutes = int(temp.time / 60);
1012 temp.seconds = int(temp.time % 60);
1013
1014 return format("%d minute(s) and %.2f second(s)", temp.minutes, temp.seconds);
1015}
1016
1017function closestTenPercent(temp.n) {
1018 if (temp.n > 1 || temp.n < 0) { return temp.n; }
1019
1020 temp.scaled = int(temp.n * 100);
1021
1022 // Extract the tenths and hundredths
1023 temp.tenths = int(temp.scaled / 10);
1024 temp.hundredths = temp.scaled % 10;
1025
1026 if (temp.hundredths >= 5) {
1027 temp.tenths += 1;
1028 }
1029
1030 // Handle carryover (e.g. 0.95 becomes 1.0)
1031 if (temp.tenths >= 10) {
1032 return 1.0;
1033 }
1034
1035 return temp.tenths / 10;
1036}
1037
1038function onExecuteSQL(temp.query = "", temp.expect_return = false) {
1039 // Query debugging
1040 if (temp.query.lower().starts("echo")) {
1041 temp.do_echo = true;
1042 temp.query = temp.query.substring(5);
1043 }
1044
1045 if (temp.query.lower().starts("select")) { temp.expect_return = true; }
1046
1047 temp.t = timevar2;
1048 temp.request = requestsql2(DATABASE, temp.query, temp.expect_return);
1049 onCPUTick();
1050
1051 // If we dont expect a response...
1052 if (!temp.expect_return) {
1053 if (temp.do_echo) {
1054 onBackupDebug(format("SQL Query Debug: %s", temp.query));
1055 }
1056
1057 return temp.request;
1058 }
1059
1060 if (!temp.request.completed && !temp.request.error) {
1061 waitfor(temp.request, "onReceiveData", 60);
1062 }
1063
1064 // Just a way to keep track of the amount of SQL requests per minute
1065 temp.min_before = this.last_minute;
1066 this.last_minute = int(timevar2 / 60);
1067
1068 if (temp.min_before == this.last_minute) {
1069 this.sql_reqs_last_minute++;
1070 } else {
1071 this.sql_reqs_last_minute = 1;
1072 }
1073
1074 if (temp.request.error != NULL) {
1075 onBackupDebug(format("SQL Error!\n\tError: %s\n\tQuery: %s", temp.request.error, temp.query));
1076 savelog2("zippy_sqlerrors.txt", temp.request.error NL temp.query);
1077 } else {
1078 if (temp.do_echo) {
1079 onBackupDebug(format("SQL\n\tQuery: %s\n\tResponse: %s", temp.query, temp.request.savejsontostring(2)));
1080 }
1081 }
1082
1083 return temp.request;
1084}
1085