|
| 1 | +# Backing Up the Store |
| 2 | + |
| 3 | +To make backups of your KVStore, use the CLI snapshot command to copy nodes in the store. |
| 4 | +To maintain consistency, no topology changes should be in process when you create a snapshot. |
| 5 | +Restoring a snapshot relies on the system configuration having exactly the same topology that was in effect when you created the snapshot. |
| 6 | + |
| 7 | +Due to the distributed nature and scale of Oracle NoSQL Database, it is unlikely that a single machine has the resources to contain snapshots for the entire store. |
| 8 | + |
| 9 | +## Managing Snapshots |
| 10 | + |
| 11 | +When you create a snapshot, the utility collects data from every Replication Node in the system, including Masters and replicas. |
| 12 | +If the operation does not succeed for any one node in a shard, the entire snapshot fails. |
| 13 | + |
| 14 | +The command “snapshot create” provided the backup name when it runs successfully. |
| 15 | +```` |
| 16 | +kv_admin snapshot create -name BACKUP |
| 17 | +Created data snapshot named 210705-101307-BACKUP on all 11 components |
| 18 | +Successfully backup configurations on sn1, sn2, sn3 |
| 19 | +```` |
| 20 | + |
| 21 | +The command “snapshot create” is not providing the backup name if something goes wrong. |
| 22 | +```` |
| 23 | +kv_admin snapshot create -name BACKUP |
| 24 | +Create data snapshot succeeded but not on all components |
| 25 | +Successfully backup configurations on sn1, sn2, sn3 |
| 26 | +```` |
| 27 | + |
| 28 | +AS YOU CAN SEE HERE, There is no warning or information if all replication nodes are unavailable for a replication group |
| 29 | + |
| 30 | +```` |
| 31 | +kv_admin snapshot create -name BACKUP |
| 32 | +Successfully backup configurations on sn1, sn2, sn3 |
| 33 | +```` |
| 34 | + |
| 35 | +use JSON output that shows more information and allow to see what exactly happened it. (same tests) |
| 36 | +```` |
| 37 | +kv_admin snapshot create -name BACKUP -json 2>/dev/null |
| 38 | +{ |
| 39 | + "operation" : "snapshot operation", |
| 40 | + "returnCode" : 5000, |
| 41 | + "description" : "Operation ends successfully", |
| 42 | + "returnValue" : { |
| 43 | + "snapshotName" : "210705-133631-BACKUP", |
| 44 | + "successSnapshots" : [ "admin1", "admin2", "rg1-rn1", "rg1-rn2", "rg1-rn3", "rg2-rn1", "rg2-rn2", "rg2-rn3", "rg3-rn1", "rg3-rn2", "rg3-rn3" ], |
| 45 | + "failureSnapshots" : [ ], |
| 46 | + "successSnapshotConfigs" : [ "sn1", "sn2", "sn3" ], |
| 47 | + "failureSnapshotConfigs" : [ ] |
| 48 | + } |
| 49 | +} |
| 50 | +```` |
| 51 | +```` |
| 52 | +kv_admin snapshot create -name BACKUP -json 2>/dev/null |
| 53 | +{ |
| 54 | + "operation" : "snapshot operation", |
| 55 | + "returnCode" : 5500, |
| 56 | + "description" : "Operation ends successfully", |
| 57 | + "returnValue" : { |
| 58 | + "snapshotName" : "210705-133737-BACKUP", |
| 59 | + "successSnapshots" : [ "admin1", "admin2", "rg1-rn1", "rg1-rn2", "rg1-rn3", "rg2-rn1", "rg2-rn2", "rg2-rn3", "rg3-rn1", "rg3-rn2" ], |
| 60 | + "failureSnapshots" : [ "rg3-rn3" ], |
| 61 | + "successSnapshotConfigs" : [ "sn1", "sn2", "sn3" ], |
| 62 | + "failureSnapshotConfigs" : [ ] |
| 63 | + } |
| 64 | +} |
| 65 | +```` |
| 66 | +```` |
| 67 | +kv_admin snapshot create -name BACKUP -json 2>/dev/null |
| 68 | +{ |
| 69 | + "operation" : "snapshot operation", |
| 70 | + "returnCode" : 5500, |
| 71 | + "description" : "Operation ends successfully", |
| 72 | + "returnValue" : { |
| 73 | + "snapshotName" : "210705-133846-BACKUP", |
| 74 | + "successSnapshots" : [ "admin1", "admin2", "rg1-rn1", "rg1-rn2", "rg1-rn3", "rg2-rn1", "rg2-rn2", "rg2-rn3" ], |
| 75 | + "failureSnapshots" : [ "rg3-rn1", "rg3-rn2", "rg3-rn3" ], |
| 76 | + "successSnapshotConfigs" : [ "sn1", "sn2", "sn3" ], |
| 77 | + "failureSnapshotConfigs" : [ ] |
| 78 | + } |
| 79 | +} |
| 80 | +```` |
| 81 | +You can use the command `show topology` to have the backup path at each Storage Node (sn) : |
| 82 | +* { rootDirPath }/snapshots/ |
| 83 | +* {storageDirEnvPath[]}/../snapsthots |
| 84 | +* {adminDirsPath}/*/snapshots |
| 85 | + |
| 86 | +```` |
| 87 | +kv_admin show topology -verbose -json | jq -r '.returnValue.sns[] | select (.resourceId == "sn1")|[{name:.resourceId,host:.hostname,rootDir:.rootDirPath,rns:.rns[]}]' |
| 88 | +[ |
| 89 | + { |
| 90 | + "name": "sn1", |
| 91 | + "host": "node1-nosql", |
| 92 | + "rootDir": "/home/opc/nosql/kvroot", |
| 93 | + "rns": { |
| 94 | + "resourceId": "rg1-rn1", |
| 95 | + "storageDirPath": "/home/opc/nosql/data/disk1", |
| 96 | + "storageDirEnvPath": "/home/opc/nosql/data/disk1/rg1-rn1/env", |
| 97 | + "storageDirSize": 524288000 |
| 98 | + } |
| 99 | + }, |
| 100 | + { |
| 101 | + "name": "sn1", |
| 102 | + "host": "node1-nosql", |
| 103 | + "rootDir": "/home/opc/nosql/kvroot", |
| 104 | + "rns": { |
| 105 | + "resourceId": "rg2-rn1", |
| 106 | + "storageDirPath": "/home/opc/nosql/data/disk2", |
| 107 | + "storageDirEnvPath": "/home/opc/nosql/data/disk2/rg2-rn1/env", |
| 108 | + "storageDirSize": 524288000 |
| 109 | + } |
| 110 | + }, |
| 111 | + { |
| 112 | + "name": "sn1", |
| 113 | + "host": "node1-nosql", |
| 114 | + "rootDir": "/home/opc/nosql/kvroot", |
| 115 | + "rns": { |
| 116 | + "resourceId": "rg3-rn1", |
| 117 | + "storageDirPath": "/home/opc/nosql/data/disk3", |
| 118 | + "storageDirEnvPath": "/home/opc/nosql/data/disk3/rg3-rn1/env", |
| 119 | + "storageDirSize": 524288000 |
| 120 | + } |
| 121 | + } |
| 122 | +] |
| 123 | +```` |
| 124 | +NB: Currently the adminDirsPath is not shown. An enhacement request was filled. In the meantime, please use the following command : |
| 125 | + |
| 126 | +```` |
| 127 | +kv_admin show parameter -service sn1 -json | jq -r -c '.returnValue.adminDirs[].path' |
| 128 | +/home/opc/nosql/admin |
| 129 | +```` |
| 130 | + |
0 commit comments