Skip to content

Commit 3d9ffda

Browse files
committed
fix(backend): replace print with logger
1 parent 5428568 commit 3d9ffda

4 files changed

Lines changed: 18 additions & 69 deletions

File tree

backend/app/main.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ async def startup_event():
7777
init_db()
7878

7979
if DISTRIBOX_MODE == "slave":
80-
print(f"✓ Starting in SLAVE mode")
80+
logger.info("Starting in SLAVE mode")
8181
asyncio.create_task(_slave_heartbeat_loop())
8282
return
8383

@@ -99,7 +99,7 @@ async def startup_event():
9999
)
100100
session.add(admin)
101101
session.commit()
102-
print(f"✓ Created default admin user: {admin_username}")
102+
logger.info("Created default admin user: %s", admin_username)
103103
else:
104104
should_save_admin = False
105105
if DISTRIBOX_ADMIN_POLICY not in admin.policies:
@@ -115,7 +115,7 @@ async def startup_event():
115115
if should_save_admin:
116116
session.add(admin)
117117
session.commit()
118-
print(f"✓ Admin user already exists: {admin_username}")
118+
logger.info("Admin user already exists: %s", admin_username)
119119

120120
users = session.exec(
121121
select(UserORM).where(UserORM.password.is_not(None))
@@ -128,14 +128,14 @@ async def startup_event():
128128
migrated_usernames.append(user.username)
129129
if migrated_usernames:
130130
session.commit()
131-
print(
132-
"Encrypted plaintext passwords for users: " +
133-
", ".join(migrated_usernames)
131+
logger.info(
132+
"Encrypted plaintext passwords for users: %s",
133+
", ".join(migrated_usernames),
134134
)
135135

136136
asyncio.create_task(_enforce_event_deadlines())
137137
asyncio.create_task(_check_stale_slaves())
138-
print(f"✓ Starting in MASTER mode")
138+
logger.info("Starting in MASTER mode")
139139

140140

141141
async def _enforce_event_deadlines():

backend/app/services/event_service.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import logging
12
import re
23
import uuid
34
from datetime import datetime
@@ -19,6 +20,8 @@
1920
from app.services.slave_service import SlaveService
2021
from app.services.slave_client import slave_get_host_info
2122

23+
logger = logging.getLogger(__name__)
24+
2225

2326
def _sanitize_name(name: str) -> str:
2427
sanitized = re.sub(r"[^a-z0-9-]", "-", name.lower().strip())
@@ -166,9 +169,6 @@ def _pick_node_for_vm(required_mem: int, required_vcpus: int, required_disk: int
166169
167170
Returns None for master, or slave UUID for a slave node.
168171
"""
169-
import logging
170-
logger = logging.getLogger(__name__)
171-
172172
# Check master first
173173
try:
174174
master = HostService.get_host_info()

backend/app/services/host_service.py

Lines changed: 0 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -4,60 +4,6 @@
44
from app.services.vm_service import VmService
55
from app.core.config import system_monitor
66

7-
# cpu_total_usage = 0
8-
# usage_per_cpu = []
9-
10-
# def get_cpu_usage_percent(cpu_idle_time_t2, cpu_idle_time_t1, cpu_total_time_t2, cpu_total_time_t1):
11-
# return (1 - (cpu_idle_time_t2 - cpu_idle_time_t1)/
12-
# (sum(cpu_total_time_t2) - sum(cpu_total_time_t1))) * 100
13-
14-
# def get_cpu_counters():
15-
# per_cpus = psutil.cpu_times(percpu=True)
16-
# total = Counter()
17-
# for cpu in per_cpus:
18-
# total.update(cpu._asdict())
19-
# cpu_total = psutil.cpu_times()
20-
# return {
21-
# "per_cpus": per_cpus,
22-
# "cpu_total": cpu_total
23-
# }
24-
25-
# def get_cpu_usage():
26-
# global cpu_total_usage, usage_per_cpu
27-
# while True:
28-
# cpu_usage_t1 = get_cpu_counters()
29-
# sleep(3)
30-
# cpu_usage_t2 = get_cpu_counters()
31-
# cpu_total_usage = get_cpu_usage_percent(cpu_usage_t2["cpu_total"].idle, cpu_usage_t1["cpu_total"].idle, cpu_usage_t2["cpu_total"], cpu_usage_t1["cpu_total"])
32-
# usage_per_cpu = []
33-
# for i in range(len(cpu_usage_t1["per_cpus"])):
34-
# usage_per_cpu.append(round(get_cpu_usage_percent(cpu_usage_t2["per_cpus"][i].idle, cpu_usage_t1["per_cpus"][i].idle, cpu_usage_t2["per_cpus"][i], cpu_usage_t1["per_cpus"][i]), 2))
35-
36-
37-
# Thread(target=get_cpu_usage, daemon=True).start()
38-
39-
# cpu_overall_time_t1 = psutil.cpu_times(percpu=True)
40-
# cpu_total_time_t1 = psutil.cpu_times()
41-
# print(sum(cpu_total_time_t1))
42-
# cpu_idle_time_t1 = cpu_total_time_t1.idle
43-
44-
45-
# sleep(3)
46-
# cpu_total_time_t2 = psutil.cpu_times()
47-
# cpu_idle_time_t2 = cpu_total_time_t2.idle
48-
49-
# percent_used = (1 - (cpu_idle_time_t2 - cpu_idle_time_t1)/ (sum(cpu_total_time_t2) - sum(cpu_total_time_t1))) * 100
50-
# print(percent_used)
51-
# print(psutil.cpu_percent(interval=1))
52-
# print(sum(psutil.cpu_times(percpu=True)))
53-
54-
55-
# print(psutil.cpu_times_percent(interval=2))
56-
57-
58-
# for dom_stats in stats:
59-
# data = dom_stats[1]
60-
# print(data.get('cpu.system'))
617

628
class HostService:
639

backend/app/services/image_service.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,13 @@
1+
import logging
12
import subprocess
23
import json
34
import yaml
45
from app.models.image import ImageRead
56
from pathlib import Path
67
from app.core.config import s3, distribox_bucket_registry
78

9+
logger = logging.getLogger(__name__)
10+
811

912
class ImageService():
1013

@@ -17,10 +20,10 @@ def get_distribox_image(image_name):
1720
data = yaml.safe_load(content)
1821
return ImageRead(**data)
1922
except s3.exceptions.NoSuchKey:
20-
print(f"No image found")
23+
logger.warning("No image found: %s", image_name)
2124
return None
22-
except Exception as e:
23-
print(f"Error")
25+
except Exception:
26+
logger.exception("Error fetching image: %s", image_name)
2427
return None
2528

2629
@staticmethod
@@ -39,6 +42,6 @@ def get_distribox_image_list():
3942
try:
4043
image = ImageRead(**data)
4144
images.append(image)
42-
except Exception as e:
43-
print(f"Error on {key}: {e}")
45+
except Exception:
46+
logger.warning("Failed to parse image %s", key)
4447
return images

0 commit comments

Comments
 (0)