Commit 3bb20056 authored by Tim Bleimehl's avatar Tim Bleimehl 🤸🏼
Browse files


parent 73d56f9f
......@@ -243,7 +243,8 @@ class BackupManager:
def rotate_existing_backups(self):"Start backup rotating...")
if not self.base_path.is_dir():"No backups to rotate at '{str(self.base_path)}'")
log.warning(f"No backups to rotate at '{str(self.base_path.absolute())}'")
for database_base_backup_dir in self.base_path.iterdir():
if not database_base_backup_dir.is_dir():
......@@ -7,7 +7,8 @@ from executer import Executer
from backup_manager import BackupManager, RetentionType
config: DEFAULT = getConfig(config_classes=[DEFAULT])
log = logging.getLogger("BaseBackupper")
# log = logging.getLogger("BaseBackupper")
from log import log
class BaseBackupper:
......@@ -81,14 +82,15 @@ class BaseBackupper:
return [
for database in result.decode("utf-8").splitlines()
if database not in self.ignore_databases or show_all
if (database.strip() not in self.ignore_databases or show_all)
and database.strip() != ""
def backup(
databases: List[str] = None,
retention_type: RetentionType = RetentionType.MANUAL,
) -> List[Path]:"Start database backup...")
if not databases:
# no certain databases specified. We backup all available databases
......@@ -98,10 +100,9 @@ class BaseBackupper:
elif isinstance(databases, str):
# Caller just provided single database name. Thats ok we can handle that
databases = [databases]
backup_pathes = []
log.debug(f"Following databases will be backuped: {databases}")
for database in databases:
if database in self.ignore_databases:
if not database in self.list_databases():
msg = f"Database {database} does not exists @ '{}' with user '{self.user}'"
......@@ -114,7 +115,12 @@ class BaseBackupper:
cmd = self.get_backup_command(database, filepath)"Backup '{database}' to '{filepath}'")
log.warning("No Databases found to backup...")
return backup_pathes
def get_backup_command(self, database_name, target_filepath):
raise NotImplementedError
......@@ -186,13 +192,13 @@ class PostgresBackupper(BaseBackupper):
def get_backup_command(self, database_name, target_filepath):
return f"export PGPASSWORD=${self.password} && {self.bin_dump} {'--clean' if self.add_drop_database else ''} -h {} -U {self.user} {database_name} {'| gzip -9' if self.compress_backup else ''} >{target_filepath}"
return f"env PGPASSWORD={self.password} {self.bin_dump} {'--clean' if self.add_drop_database else ''} -h {} -U {self.user} {database_name} {'| gzip -9' if self.compress_backup else ''} >{target_filepath}"
def get_restore_command(self, database_name):
return f"""export PGPASSWORD={self.password} && {self.bin_cmd} -U {self.user} -d {database_name}"""
return f"""env PGPASSWORD={self.password} {self.bin_cmd} -U {self.user} -d {database_name}"""
def get_list_database_command(self):
return f"""export PGPASSWORD=${self.password} && psql -U {self.user} -h {} -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;" """
return f"""env PGPASSWORD={self.password} {self.bin_cmd} -U {self.user} -h {} -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;" """
class Neo4jOnlineBackupper(BaseBackupper):
......@@ -169,14 +169,12 @@ def backup_kubernetes(namespace, all_namespaces, target_dir):
"""Backup databases in a kubernetes environment"""
from container_helper import ContainerHelper
i = 0
count_db_instances = 0
count_dbs = 0
for pod in ContainerHelper.kubernetes_get_pods_to_be_backed_up(
namespace=namespace, all_namespaces=all_namespaces
if pod.backup_labels[ValidLabels.enabled].val:
print(ValidLabels.database_type == "")
BackupperClass = database_type_backupper_mapping[
......@@ -211,13 +209,17 @@ def backup_kubernetes(namespace, all_namespaces, target_dir):
databases = None
databases = pod.backup_labels[ValidLabels.database_names].val.split(",")
backup_files = bu.backup(
i += 1
count_dbs += len(backup_files)
count_db_instances += 1
click.echo(f"\nBackuped {i} databases")
f"\nBackuped {count_dbs} database(s) in {count_db_instances} instance(s)"
......@@ -130,9 +130,9 @@ class ContainerHelper:
# the Workload specs.selector.matchLabels defines how pods running under this workload have to be labeled
# this provides us the information to find pods belonging to a/this specific workload
pod_selector_labels: List[Label] = [
Label(lbl) for lbl in workload["spec"]["selector"]["matchLabels"]
pod_selector_labels: List[Label] = Label.from_dict(
for wl_pod in cls.kubernetes_get_pods(
......@@ -22,7 +22,7 @@ class Label:
info: str = None,
base_label_key: str = None,
if isinstance(label, Dict):
if isinstance(label, dict):
key = list(label.keys())[0]
val = list(label.values())[0]
......@@ -45,6 +45,13 @@ class Label:
self.default: Union[str, int, bool] = default str = info
def from_dict(cls, labels: dict) -> List["Label"]:
l = []
for key, val in labels.items():
l.append(Label({key: val}))
return l
def __eq__(self, other: Union["Label", str]):
return (
type(other) is type(self)
......@@ -67,7 +74,10 @@ class Label:
return self.key
def __repr__(self):
if self.val:
return f"<Label '{self.key}={self.val}'>"
return f"<Label '{self.key}'>"
class ValidLabels:
......@@ -10,44 +10,3 @@ docker exec mysql /usr/bin/psql -Atx postgresql://postgres:mysupersavepw@localho
INSERT INTO coda_ps_test_scheme.my_table(id,firstname) VALUES (2,'Thomas'); \
# Issue with multiple databases in k8s
Labels are not corretly assigned
/bin/python3 /home/tim/Repos/ backup kubernetes --all-namespaces
Container(id='c629b052-bc93-4781-af6f-a13b682e66e8', name='mariadb01-0', backup_labels={<Label ''>: <Label ''>, <Label ''>: <Label ''>, <Label ''>: <Label ''>, <Label ''>: <Label ''>, <Label ''>: <Label ''>, <Label ''>: <Label ''>, <Label ''>: <Label ''>, <Label ''>: <Label ''>, <Label ''>: <Label ''>, <Label ''>: <Label ''>, <Label ''>: <Label ''>, <Label ''>: <Label ''>}, other_labels={<Label 'controller-revision-hash=mariadb01-6844bc5dc7'>: <Label 'controller-revision-hash=mariadb01-6844bc5dc7'>, <Label ''>: <Label ''>, <Label ''>: <Label ''>}, desc={'apiVersion': 'v1', 'kind': 'Pod', 'metadata': {'creationTimestamp': '2022-01-19T13:41:16Z', 'generateName': 'mariadb01-', 'labels': {'controller-revision-hash': 'mariadb01-6844bc5dc7', '': 'mariadb01-0', '': 'apps.statefulset-default-mariadb01'}, 'name': 'mariadb01-0', 'namespace': 'default', 'ownerReferences': [{'apiVersion': 'apps/v1', 'blockOwnerDeletion': True, 'controller': True, 'kind': 'StatefulSet', 'name': 'mariadb01', 'uid': '8b9a22b5-e787-4b8e-90a2-44b57a6d134f'}], 'resourceVersion': '953431', 'uid': 'c629b052-bc93-4781-af6f-a13b682e66e8'}, 'spec': {'containers': [{'env': [{'name': 'MYSQL_ROOT_PASSWORD', 'value': '498zrthfwejfef'}], 'image': 'mariadb:10', 'imagePullPolicy': 'Always', 'name': 'mariadb01', 'ports': [{'containerPort': 3306, 'hostPort': 3308, 'name': 'port3308', 'protocol': 'TCP'}], 'resources': {}, 'terminationMessagePath': '/dev/termination-log', 'terminationMessagePolicy': 'File', 'volumeMounts': [{'mountPath': '/var/run/secrets/', 'name': 'kube-api-access-rv57j', 'readOnly': True}]}], 'dnsPolicy': 'ClusterFirst', 'enableServiceLinks': True, 'hostname': 'mariadb01-0', 'nodeName': 'local-node', 'preemptionPolicy': 'PreemptLowerPriority', 'priority': 0, 'restartPolicy': 'Always', 'schedulerName': 'default-scheduler', 'securityContext': {}, 'serviceAccount': 'default', 'serviceAccountName': 'default', 'terminationGracePeriodSeconds': 30, 'tolerations': [{'effect': 'NoExecute', 'key': '', 'operator': 'Exists', 'tolerationSeconds': 300}, {'effect': 'NoExecute', 'key': '', 'operator': 'Exists', 'tolerationSeconds': 300}], 'volumes': [{'name': 'kube-api-access-rv57j', 'projected': {'defaultMode': 420, 'sources': [{'serviceAccountToken': {'expirationSeconds': 3607, 'path': 'token'}}, {'configMap': {'items': [{'key': 'ca.crt', 'path': 'ca.crt'}], 'name': 'kube-root-ca.crt'}}, {'downwardAPI': {'items': [{'fieldRef': {'apiVersion': 'v1', 'fieldPath': 'metadata.namespace'}, 'path': 'namespace'}]}}]}}]}, 'status': {'conditions': [{'lastProbeTime': None, 'lastTransitionTime': '2022-01-19T13:41:16Z', 'status': 'True', 'type': 'Initialized'}, {'lastProbeTime': None, 'lastTransitionTime': '2022-01-24T08:47:09Z', 'status': 'True', 'type': 'Ready'}, {'lastProbeTime': None, 'lastTransitionTime': '2022-01-24T08:47:09Z', 'status': 'True', 'type': 'ContainersReady'}, {'lastProbeTime': None, 'lastTransitionTime': '2022-01-19T13:41:16Z', 'status': 'True', 'type': 'PodScheduled'}], 'containerStatuses': [{'containerID': 'containerd://6dccb61553b7916bb028f17e56a8c46920a084c2d276736290725724a732d15d', 'image': '', 'imageID': '', 'lastState': {'terminated': {'containerID': 'containerd://d128bb5e383e0131dd4cdba092f34e231021768e49426cdaa249e4af8af01589', 'exitCode': 255, 'finishedAt': '2022-01-24T08:46:52Z', 'reason': 'Unknown', 'startedAt': '2022-01-23T10:49:37Z'}}, 'name': 'mariadb01', 'ready': True, 'restartCount': 5, 'started': True, 'state': {'running': {'startedAt': '2022-01-24T08:47:08Z'}}}], 'hostIP': '', 'phase': 'Running', 'podIP': '', 'podIPs': [{'ip': ''}], 'qosClass': 'BestEffort', 'startTime': '2022-01-19T13:41:16Z'}}, parent={'apiVersion': 'apps/v1', 'kind': 'Deployment', 'metadata': {'annotations': {'': '3'}, 'creationTimestamp': '2022-01-24T15:24:28Z', 'generation': 3, 'labels': {'': 'true', '': 'supersavepw', '': 'postgres', '': 'postgres', '': 'apps.deployment-my-namespace-postgres01'}, 'name': 'postgres01', 'namespace': 'my-namespace', 'resourceVersion': '1081236', 'uid': 'd26d382e-1f5f-4749-b077-20424a0d8bbc'}, 'spec': {'progressDeadlineSeconds': 600, 'replicas': 1, 'revisionHistoryLimit': 10, 'selector': {'matchLabels': {'': 'apps.deployment-my-namespace-postgres01'}}, 'strategy': {'rollingUpdate': {'maxSurge': '25%', 'maxUnavailable': '25%'}, 'type': 'RollingUpdate'}, 'template': {'metadata': {'annotations': {'': '2022-01-24T16:01:03Z'}, 'creationTimestamp': None, 'labels': {'': 'apps.deployment-my-namespace-postgres01'}}, 'spec': {'affinity': {}, 'containers': [{'env': [{'name': 'POSTGRES_PASSWORD', 'value': 'supersavepw'}], 'image': 'postgres:12', 'imagePullPolicy': 'Always', 'name': 'container-0', 'resources': {}, 'terminationMessagePath': '/dev/termination-log', 'terminationMessagePolicy': 'File'}], 'dnsPolicy': 'ClusterFirst', 'restartPolicy': 'Always', 'schedulerName': 'default-scheduler', 'securityContext': {}, 'terminationGracePeriodSeconds': 30}}}, 'status': {'availableReplicas': 1, 'conditions': [{'lastTransitionTime': '2022-01-24T15:24:47Z', 'lastUpdateTime': '2022-01-24T15:24:47Z', 'message': 'Deployment has minimum availability.', 'reason': 'MinimumReplicasAvailable', 'status': 'True', 'type': 'Available'}, {'lastTransitionTime': '2022-01-24T15:24:28Z', 'lastUpdateTime': '2022-01-24T16:01:06Z', 'message': 'ReplicaSet "postgres01-66646fbcc5" has successfully progressed.', 'reason': 'NewReplicaSetAvailable', 'status': 'True', 'type': 'Progressing'}], 'observedGeneration': 3, 'readyReplicas': 1, 'replicas': 1, 'updatedReplicas': 1}}, kubernetes_namespace='default')
[INFO] Start database backup...
Traceback (most recent call last):
File "/home/tim/Repos/", line 497, in <module>
File "/home/tim/.local/lib/python3.9/site-packages/click/", line 1128, in __call__
return self.main(*args, **kwargs)
File "/home/tim/.local/lib/python3.9/site-packages/click/", line 1053, in main
rv = self.invoke(ctx)
File "/home/tim/.local/lib/python3.9/site-packages/click/", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tim/.local/lib/python3.9/site-packages/click/", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tim/.local/lib/python3.9/site-packages/click/", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/tim/.local/lib/python3.9/site-packages/click/", line 754, in invoke
return __callback(*args, **kwargs)
File "/home/tim/Repos/", line 214, in backup_kubernetes
File "/home/tim/Repos/", line 96, in backup
databases = self.list_databases()
File "/home/tim/Repos/", line 80, in list_databases
result = self.executer.container_exec(command=self.get_list_database_command())
File "/home/tim/Repos/", line 47, in container_exec
return self.exec(exec_command)
File "/home/tim/Repos/", line 71, in exec
raise RuntimeError(
RuntimeError: ERROR CODE 1 on command
' kubectl exec -n default mariadb01-0 -- export PGPASSWORD=$supersavepw && psql -U postgres -h -t -c "SELECT datname FROM pg_database WHERE datistemplate = false;" ':
error: Internal error occurred: error executing command in container: failed to exec in container: failed to start exec "c79a99567f935b2bd12a7453bd74fe521ed8a1b32d98b6dd088c5d793dccae06": OCI runtime exec failed: exec failed: container_linux.go:380: starting container process caused: exec: "export": executable file not found in $PATH: unknown
\ No newline at end of file
......@@ -84,6 +84,7 @@ Thats it. We now have a directory `./backups/` in front of us, with all database
* (WIP) Restore Wizard
* (WIP) Neo4j support
* (WIP) Database auth via Docker/kubernetes Secrets
* (Planned) Suppord pod with more than one container
* (Planned) Create database if not existent via label conf (checked/executed when pod starts via
* (Planned) Email notification
* (Planned) Support of docker and kubernetes secrets for providing database auth
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment