otb-cloud secure encrypted backups
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

220 lines
6.2 KiB

from app.db import get_db
from pathlib import Path
def get_tenant_row(db, tenant):
cur = db.cursor()
cur.execute(
"SELECT id, storage_root FROM tenants WHERE slug = %s LIMIT 1",
(tenant,)
)
row = cur.fetchone()
if not row:
return None
return row
def _current_db_name(db):
with db.cursor() as cur:
cur.execute("SELECT DATABASE() AS dbname")
row = cur.fetchone()
return row["dbname"]
def _table_exists(db, table_name):
dbname = _current_db_name(db)
with db.cursor() as cur:
cur.execute(
"""
SELECT COUNT(*) AS c
FROM information_schema.tables
WHERE table_schema = %s AND table_name = %s
""",
(dbname, table_name)
)
row = cur.fetchone()
return int(row["c"]) > 0
def _table_columns(db, table_name):
dbname = _current_db_name(db)
with db.cursor() as cur:
cur.execute(
"""
SELECT column_name
FROM information_schema.columns
WHERE table_schema = %s AND table_name = %s
""",
(dbname, table_name)
)
rows = cur.fetchall()
return {r["column_name"] for r in rows}
def _pick(cols, *names):
for n in names:
if n in cols:
return n
return None
def resolve_source_from_file_id(db, tenant_id, device_id, source_file_id):
candidate_tables = ["files", "device_files", "uploaded_files"]
for table in candidate_tables:
if not _table_exists(db, table):
continue
cols = _table_columns(db, table)
id_col = _pick(cols, "id")
rel_col = _pick(cols, "relative_path", "source_relative_path", "path", "storage_relative_path")
orig_col = _pick(cols, "original_filename", "filename", "display_filename", "basename")
tenant_col = _pick(cols, "tenant_id")
device_col = _pick(cols, "device_id")
if not id_col or not rel_col or not orig_col:
continue
sql = f"SELECT {id_col} AS id, {rel_col} AS rel_path, {orig_col} AS orig_name FROM {table} WHERE {id_col} = %s"
args = [source_file_id]
if tenant_col:
sql += f" AND {tenant_col} = %s"
args.append(tenant_id)
if device_col:
sql += f" AND {device_col} = %s"
args.append(device_id)
sql += " LIMIT 1"
with db.cursor() as cur:
cur.execute(sql, tuple(args))
row = cur.fetchone()
if row:
return {
"source_relative_path": row["rel_path"],
"source_original_filename": row["orig_name"],
}
raise RuntimeError(
f"Could not resolve file metadata for source_file_id={source_file_id}. "
f"Tried tables: files, device_files, uploaded_files"
)
def create_video_job(tenant, device_id, source_file_id, profile="default", rotation_override=None):
db = get_db()
tenant_row = get_tenant_row(db, tenant)
if not tenant_row:
raise Exception(f"Tenant not found: {tenant}")
tenant_id = tenant_row["id"]
file_meta = resolve_source_from_file_id(
db=db,
tenant_id=tenant_id,
device_id=device_id,
source_file_id=int(source_file_id),
)
cur = db.cursor()
cur.execute(
"""
INSERT INTO video_jobs (
tenant_id,
device_id,
source_file_id,
source_relative_path,
source_original_filename,
requested_profile,
requested_rotation_degrees,
requested_gpu_preference,
status,
progress_percent
) VALUES (%s, %s, %s, %s, %s, %s, %s, 'auto', 'queued', 0)
""",
(
tenant_id,
device_id,
int(source_file_id),
file_meta["source_relative_path"],
file_meta["source_original_filename"],
profile,
rotation_override,
)
)
db.commit()
return cur.lastrowid
def _safe_size(storage_root, rel_path):
if not rel_path:
return None
try:
p = Path(storage_root) / rel_path
if p.exists() and p.is_file():
return p.stat().st_size
except Exception:
return None
return None
def list_jobs_for_tenant(tenant):
db = get_db()
tenant_row = get_tenant_row(db, tenant)
if not tenant_row:
return []
tenant_id = tenant_row["id"]
storage_root = tenant_row["storage_root"]
cur = db.cursor()
cur.execute(
"""
SELECT
id,
device_id,
source_file_id,
source_relative_path,
source_original_filename,
requested_profile,
requested_rotation_degrees,
status,
progress_percent,
assigned_processor,
output_relative_path,
error_message,
created_at,
started_at,
completed_at
FROM video_jobs
WHERE tenant_id = %s
ORDER BY id DESC
LIMIT 100
""",
(tenant_id,)
)
rows = cur.fetchall()
out = []
for r in rows:
original_size = _safe_size(storage_root, r["source_relative_path"])
processed_size = _safe_size(storage_root, r["output_relative_path"])
out.append({
"id": r["id"],
"device_id": r["device_id"],
"source_file_id": r["source_file_id"],
"filename": r["source_original_filename"],
"profile": r["requested_profile"],
"rotation_override": r["requested_rotation_degrees"],
"status": r["status"],
"progress_percent": r["progress_percent"],
"assigned_processor": r["assigned_processor"],
"output_relative_path": r["output_relative_path"],
"error_message": r["error_message"],
"original_size": original_size,
"processed_size": processed_size,
"created_at": str(r["created_at"]) if r["created_at"] is not None else None,
"started_at": str(r["started_at"]) if r["started_at"] is not None else None,
"completed_at": str(r["completed_at"]) if r["completed_at"] is not None else None,
})
return out