Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PowerTarget device #3

Merged
merged 5 commits into from
Jan 23, 2025
Merged
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
97 changes: 97 additions & 0 deletions zap/devices/power_target.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
import numpy as np
import scipy.sparse as sp
import cvxpy
import torch
from attrs import define, field, Factory

from typing import Optional
from numpy.typing import NDArray

from .abstract import AbstractDevice, get_time_horizon, make_dynamic


@define(kw_only=True, slots=False)
class PowerTarget(AbstractDevice):
"""A single-node device that tries to match its power output to a target value."""

num_nodes: int
terminal: NDArray
target_power: NDArray = field(converter=make_dynamic)
norm_order: int = field(default=2)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We're allowing the user to set the norm order to determine the penalty function, but in the ADMM proximal update we are hard-coding the update for the L2 norm squared right? That might be an issue if someone expects that they can change this to an L1 norm or something for example

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch. I now restrict the norm to be either L1 or L2 and added different prox updates for each one.


@property
def terminals(self):
return self.terminal

@property
def time_horizon(self):
return get_time_horizon(self.min_power)

# ====
# CORE MODELING FUNCTIONS
# ====

def equality_constraints(self, power, angle, _, target_power=None, la=np):
return []

def inequality_constraints(self, power, angle, _, target_power=None, la=np):
return []

def operation_cost(self, power, angle, _, target_power=None, la=np):
degleris1 marked this conversation as resolved.
Show resolved Hide resolved
target_power = self.parameterize(target_power=target_power, la=la)

if la == torch:
return 0.5 * torch.linalg.vector_norm(power[0] - target_power, ord=self.norm_order)
if la == np:
return 0.5 * np.linalg.norm((power[0] - target_power).reshape(-1), ord=self.norm_order)
if la == cvxpy:
return 0.5 * cvxpy.norm(power[0] - target_power, self.norm_order)

# ====
# DIFFERENTIATION
# ====

def _equality_matrices(self, equalities, target_power=None, la=np):
return equalities

def _inequality_matrices(self, inequalities, target_power=None, la=np):
return inequalities

def _hessian_power(self, hessians, power, angle, _, target_power=None, la=np):
degleris1 marked this conversation as resolved.
Show resolved Hide resolved
target_power = self.parameterize(target_power=target_power, la=la)

hessians[0] += sp.diags((power[0] - target_power).ravel())
return hessians

# ====
# ADMM FUNCTIONS
# ====

def admm_prox_update(
self,
rho_power,
rho_angle,
power,
angle,
target_power=None,
power_weights=None,
angle_weights=None,
):
target_power = self.parameterize(target_power=target_power)
assert angle is None

return _admm_prox_update(power, rho_power, target_power)


@torch.jit.script
def _admm_prox_update(power: list[torch.Tensor], rho: float, target_power: torch.Tensor):
# Problem is
# min_p (1/2) * (p - p_target)^2 + (rho / 2) || (p - power) ||_2^2
# Objective derivative is
# (p - p_target) + rho (p - power) = 0
# Which is solved by
# p = (p_target + rho * power) / (1 + rho)

p = (target_power + rho * power[0]) / (1 + rho)

return [p], None
Loading