Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: splitting tier1 into work & scratch #194

Merged
merged 1 commit into from
Feb 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions utils/cli/hpc_access_cli/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,10 @@ class LdapGroup(BaseModel):
class ResourceData(BaseModel):
"""A resource request/usage for a user."""

#: Storage on tier 1 in TB.
tier1: float = 0.0
#: Storage on tier 1 in TB (work).
tier1_work: float = 0.0
#: Storage on tier 1 in TB (scratch).
tier1_scratch: float = 0.0
#: Storage on tier 2 (mirrored) in TB.
tier2_mirrored: float = 0.0
#: Storage on tier 2 (unmirrored) in TB.
Expand Down
34 changes: 23 additions & 11 deletions utils/cli/hpc_access_cli/states.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,12 +141,15 @@ def _build_fs_directories(self, hpcaccess_state: HpcaccessState) -> Dict[str, Fs
continue
owner = hpcaccess_state.hpc_users[group.owner]
# Tier 1
quota_work = (group.resources_requested or ResourceData).tier1
quota_work = (group.resources_requested or ResourceData).tier1_work
if not quota_work:
continue
quota_scratch = (group.resources_requested or ResourceData).tier1_scratch
if not quota_scratch:
continue
for volume, quota in (
("home", QUOTA_HOME_BYTES),
("scratch", QUOTA_SCRATCH_BYTES),
("scratch", quota_scratch * 1024 * 1024 * 1024 * 1024),
("work", quota_work * 1024 * 1024 * 1024 * 1024),
):
result[f"/data/cephfs-1/{volume}/groups/{group.name}"] = FsDirectory(
Expand Down Expand Up @@ -192,12 +195,15 @@ def _build_fs_directories(self, hpcaccess_state: HpcaccessState) -> Dict[str, Fs
owning_group = hpcaccess_state.hpc_groups[project.group]
owner = hpcaccess_state.hpc_users[owning_group.owner]
# Tier 1
quota_work = (project.resources_requested or ResourceData).tier1
quota_work = (project.resources_requested or ResourceData).tier1_work
if not quota_work:
continue # no quota requested
continue
quota_scratch = (project.resources_requested or ResourceData).tier1_scratch
if not quota_scratch:
continue
for volume, quota in (
("home", QUOTA_HOME_BYTES),
("scratch", QUOTA_SCRATCH_BYTES),
("scratch", quota_scratch * 1024 * 1024 * 1024 * 1024),
("work", quota_work * 1024 * 1024 * 1024 * 1024),
):
result[f"/data/cephfs-1/{volume}/projects/{project.name}"] = FsDirectory(
Expand Down Expand Up @@ -365,12 +371,14 @@ def build_hpcuser(u: LdapUser) -> HpcUser:
last_name=u.sn,
phone_number=u.gecos.office_phone if u.gecos else None,
resources_requested=ResourceData(
tier1=0,
tier1_work=0,
tier1_scratch=0,
tier2_mirrored=0,
tier2_unmirrored=0,
),
resources_used=ResourceData(
tier1=0,
tier1_work=0,
tier1_scratch=0,
tier2_mirrored=0,
tier2_unmirrored=0,
),
Expand All @@ -394,12 +402,14 @@ def build_hpcgroup(g: LdapGroup) -> Optional[HpcGroup]:
owner=user_uuids[user_by_dn[g.owner_dn].uid],
delegate=user_uuids[user_by_dn[g.delegate_dns[0]].uid] if g.delegate_dns else None,
resources_requested=ResourceData(
tier1=0,
tier1_work=0,
tier1_scratch=0,
tier2_mirrored=0,
tier2_unmirrored=0,
),
resources_used=ResourceData(
tier1=0,
tier1_work=0,
tier1_scratch=0,
tier2_mirrored=0,
tier2_unmirrored=0,
),
Expand All @@ -426,12 +436,14 @@ def build_hpcproject(p: LdapGroup) -> Optional[HpcProject]:
group=group_uuids[group_by_gid[user_by_dn[p.owner_dn].gid_number].cn],
delegate=user_uuids[user_by_dn[p.delegate_dns[0]].uid] if p.delegate_dns else None,
resources_requested=ResourceData(
tier1=0,
tier1_work=0,
tier1_scratch=0,
tier2_mirrored=0,
tier2_unmirrored=0,
),
resources_used=ResourceData(
tier1=0,
tier1_work=0,
tier1_scratch=0,
tier2_mirrored=0,
tier2_unmirrored=0,
),
Expand Down
Loading