diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a16f9a2..ed7288a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,10 +1,10 @@ -name: CI +name: CI workflow on: push: - branches: [ "main" ] + branches: ["main"] pull_request: - branches: [ "main" ] + branches: ["main"] jobs: build: @@ -13,27 +13,34 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.9", "3.10", "3.11"] + python-version: ["3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} + + - name: Install poetry + run: python -m pip install poetry + - name: Install dependencies + run: poetry install + + - name: Lint and format with ruff run: | - python -m pip install --upgrade pip - python -m pip install flake8 pytest - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - name: Lint with flake8 - run: | - # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test with pytest - env: - PYTHONPATH: ${{ github.workspace }}/src - run: | - pytest + python -m ruff check --fix + python -m ruff format + + - name: Auto-commit ruff changes + uses: stefanzweifel/git-auto-commit-action@v4 + with: + commit_message: "Lint and format with ruff" + + - name: Perform type checking with mypy + run: mypy src + + - name: Run tests + run: poetry run pytest --cov=src diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 2194228..16066e8 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -1,7 +1,7 @@ name: Generate API documentation on: push: - branches: [ "main" ] + branches: ["main"] jobs: deploy: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1c0b6e0..cd4b376 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -17,7 +17,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v3 with: - python-version: '3.11' + python-version: "3.11" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ddbf496..64318bb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,10 +5,12 @@ repos: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace -- repo: https://github.com/psf/black - rev: 22.10.0 +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.5.6 hooks: - - id: black + - id: ruff + - id: ruff-format + args: [--line-length=120] - repo: https://github.com/pycqa/pydocstyle rev: 6.3.0 hooks: diff --git a/pyproject.toml b/pyproject.toml index a52a4ec..7ee4b35 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,27 +1,54 @@ -[build-system] -requires = ["setuptools >= 61.0"] -build-backend = "setuptools.build_meta" - -[project] +[tool.poetry] name = "halfspace-optimizer" -version = "0.1.1" +version = "0.1.2" +description = "Cutting-plane solver for mixed-integer convex optimization problems" +license ="MIT" authors = [ - { name="Joshua Ivanhoe", email="joshua.k.ivanhoe@gmail.com" }, + "Joshua Ivanhoe " ] -description = "Cutting-plane solver for mixed-integer convex optimization problems" readme = "README.md" -license = {file = "LICENSE"} -requires-python = ">=3.9,<3.12" +repository = "https://github.com/joshivanhoe/halfspace" +documentation = "https://joshivanhoe.github.io/halfspace/" classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ] -dynamic = ["dependencies"] +packages = [ + { include = "halfspace", from = "src" } +] + +[tool.poetry.dependencies] +python = ">=3.10,<3.13" +mip = ">=1.15.0" +numpy = ">=1.25.2" +pandas = ">=2.0.3" + +[tool.poetry.group.dev.dependencies] +mypy = "*" +pre-commit = "*" +ruff = "*" -[tool.setuptools.dynamic] -dependencies = {file = ["requirements.txt"]} +[tool.poetry.group.test.dependencies] +pytest = "*" +pytest-cov = "*" -[project.urls] -Homepage = "https://github.com/joshivanhoe/halfspace" -Issues = "https://github.com/joshivanhoe/halfspace/issues" +[tool.ruff] +line-length = 100 + +[tool.ruff.lint] +extend-select = ["D"] # pydocstyle + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[tool.ruff.lint.per-file-ignores] +"tests/**" = ["D"] # Ignore all directories named `tests` for pydocstyle + +[tool.mypy] +ignore_missing_imports = true +exclude = ["convex_term.py"] + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 00698ec..0000000 --- a/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -mip>=1.15.0 -numpy>=1.25.2 -pandas>=2.0.3 -pre-commit>=3.6.0 -pytest>=7.4.4 -pytest-cov>=4.1.0 -tomli>=2.0.1 diff --git a/src/halfspace/__init__.py b/src/halfspace/__init__.py index 2f4d902..64b0b79 100644 --- a/src/halfspace/__init__.py +++ b/src/halfspace/__init__.py @@ -1,2 +1,5 @@ """The `halfspace` module implements a modelling class for mixed-integer convex optimization problems.""" + from .model import Model + +__all__ = ["Model"] diff --git a/src/halfspace/convex_term.py b/src/halfspace/convex_term.py index 20ec1df..26da12b 100644 --- a/src/halfspace/convex_term.py +++ b/src/halfspace/convex_term.py @@ -3,18 +3,20 @@ It provides a modular framework for generating cutting planes. """ -from typing import Union, Callable, Optional, Iterable +from typing import Callable, Iterable +from typing import TypeAlias, Literal, overload import mip import numpy as np +from .utils import standard_basis_vector -QueryPoint = dict[mip.Var, float] -Var = Union[mip.Var, Iterable[mip.Var], mip.LinExprTensor] -Input = Union[float, Iterable[float], np.ndarray] -Func = Callable[[Input], float] -FuncGrad = Callable[[Input], tuple[float, Union[float, np.ndarray]]] -Grad = Callable[[Input], Union[float, np.ndarray]] +QueryPoint: TypeAlias = dict[mip.Var, float] +Var: TypeAlias = mip.Var | Iterable[mip.Var] | mip.LinExprTensor +Input: TypeAlias = float | Iterable[float] | np.ndarray +Func: TypeAlias = Callable[[Input], float] +FuncGrad: TypeAlias = Callable[[Input], tuple[float, float | np.ndarray]] +Grad: TypeAlias = Callable[[Input], float | np.ndarray] class ConvexTerm: @@ -39,11 +41,11 @@ class ConvexTerm: def __init__( self, var: Var, - func: Union[Func, FuncGrad], - grad: Optional[Union[Grad, bool]] = None, + func: Func | FuncGrad, + grad: Grad | bool | None = None, step_size: float = 1e-6, name: str = "", - ): + ) -> None: """Convex term constructor. Args: @@ -59,9 +61,15 @@ def __init__( self.step_size = step_size self.name = name + @overload + def __call__(self, query_point: QueryPoint, return_grad: Literal[False] = False) -> float: ... + + @overload def __call__( - self, query_point: QueryPoint, return_grad: bool = False - ) -> Union[float, tuple[float, Union[float, np.ndarray]]]: + self, query_point: QueryPoint, return_grad: Literal[True] = True + ) -> tuple[float, float | np.ndarray]: ... + + def __call__(self, query_point: QueryPoint, return_grad: bool = False) -> float | tuple[float, float | np.ndarray]: """Evaluate the term and (optionally) its gradient. Args: @@ -96,20 +104,18 @@ def generate_cut(self, query_point: QueryPoint) -> mip.LinExpr: Returns: The linear constraint representing the cutting plane. """ - fun, grad = self(query_point=query_point, return_grad=True) + func, grad = self(query_point=query_point, return_grad=True) x = self._get_input(query_point=query_point) if self.is_multivariable: - return mip.xsum(grad * (np.array(self.var) - x)) + fun - return grad * (self.var - x) + fun + return mip.xsum(grad * (np.array(self.var) - x)) + func + return grad * (self.var - x) + func def _get_input(self, query_point: QueryPoint) -> Input: if self.is_multivariable: return np.array([query_point[var] for var in self.var]) return query_point[self.var] - def _evaluate_func( - self, x: Input - ) -> Union[float, tuple[float, Union[float, np.ndarray]]]: + def _evaluate_func(self, x: Input) -> float | tuple[float, float | np.ndarray]: """Evaluate the function value. If `grad=True`, then both the value of the function and it's gradient are returned. @@ -120,7 +126,7 @@ def _evaluate_func( return self.func(*x) raise TypeError(f"Input of type '{type(x)}' not supported.") - def _evaluate_grad(self, x: Input) -> Union[float, np.ndarray]: + def _evaluate_grad(self, x: Input) -> float | np.ndarray: """Evaluate the gradient.""" if not self.grad: return self._approximate_grad(x=x) @@ -130,21 +136,18 @@ def _evaluate_grad(self, x: Input) -> Union[float, np.ndarray]: return self.grad(*x) raise TypeError(f"Input of type '{type(x)}' not supported.") - def _approximate_grad(self, x: Input) -> Union[float, np.ndarray]: + def _approximate_grad(self, x: Input) -> float | np.ndarray: """Approximate the gradient of the function at point using the central finite difference method.""" if self.is_multivariable: - indexes = np.arange(len(x)) - return np.array( - [ - ( - self._evaluate_func(x=x + self.step_size / 2 * (indexes == i)) - - self._evaluate_func(x=x - self.step_size / 2 * (indexes == i)) - ) - / self.step_size - for i in indexes - ] - ) + n_dim = len(x) + grad = np.zeros(n_dim) + for i in range(n_dim): + e_i = standard_basis_vector(i=i, n_dim=n_dim) + grad[i] = ( + self._evaluate_func(x=x + self.step_size / 2 * e_i) + - self._evaluate_func(x=x - self.step_size / 2 * e_i) + ) / self.step_size + return grad return ( - self._evaluate_func(x=x + self.step_size / 2) - - self._evaluate_func(x=x - self.step_size / 2) + self._evaluate_func(x=x + self.step_size / 2) - self._evaluate_func(x=x - self.step_size / 2) ) / self.step_size diff --git a/src/halfspace/model.py b/src/halfspace/model.py index 2f85cbc..0be9d1c 100644 --- a/src/halfspace/model.py +++ b/src/halfspace/model.py @@ -4,7 +4,7 @@ """ import logging -from typing import Optional, Iterable, Union +from typing import Iterable import mip import numpy as np @@ -40,10 +40,10 @@ def __init__( max_gap_abs: float = 1e-4, infeasibility_tol: float = 1e-4, step_size: float = 1e-6, - smoothing: Optional[float] = 0.5, - solver_name: Optional[str] = "CBC", - log_freq: Optional[int] = 1, - ): + smoothing: float | None = 0.5, + solver_name: str | None = "CBC", + log_freq: int | None = 1, + ) -> None: """Optimization model constructor. Args: @@ -81,13 +81,13 @@ def reset(self) -> None: self._best_solution: dict[mip.Var, float] = dict() self._objective_value: float = (1 if self.minimize else -1) * mip.INF self._best_bound: float = -self._objective_value - self._status: Optional[mip.OptimizationStatus] = None + self._status: mip.OptimizationStatus | None = None self._search_log: list[dict[str, float]] = list() def add_var( self, - lb: Optional[float] = None, - ub: Optional[float] = None, + lb: float | int = 0, + ub: float | int = mip.INF, var_type: str = mip.CONTINUOUS, name: str = "", ) -> mip.Var: @@ -110,8 +110,8 @@ def add_var( def add_var_tensor( self, shape: tuple[int, ...], - lb: Optional[float] = None, - ub: Optional[float] = None, + lb: float | int = 0, + ub: float | int = mip.INF, var_type: str = mip.CONTINUOUS, name: str = "", ) -> mip.LinExprTensor: @@ -152,8 +152,8 @@ def add_linear_constr(self, constraint: mip.LinExpr, name: str = "") -> mip.Cons def add_nonlinear_constr( self, var: Var, - func: Union[Func, FuncGrad], - grad: Optional[Union[Grad, bool]] = None, + func: Func | FuncGrad, + grad: Grad | bool | None = None, name: str = "", ) -> ConvexTerm: """Add a nonlinear constraint to the model. @@ -187,8 +187,8 @@ def add_nonlinear_constr( def add_objective_term( self, var: Var, - func: Union[Func, FuncGrad], - grad: Optional[Union[Grad, bool]] = None, + func: Func | FuncGrad, + grad: Grad | bool | None = None, name: str = "", ) -> ConvexTerm: """Add an objective term to the model. @@ -222,8 +222,8 @@ def add_objective_term( def optimize( self, max_iters: int = 100, - max_iters_no_improvement: Optional[int] = None, - max_seconds_per_iter: Optional[float] = None, + max_iters_no_improvement: int | None = None, + max_seconds_per_iter: float | None = None, ) -> mip.OptimizationStatus: """Optimize the model. @@ -243,13 +243,10 @@ def optimize( self._model.objective = bound # Initialize search - query_point = { - x: self._start.get(x) or (x.lb + x.ub) / 2 for x in self._model.vars - } + query_point = {x: self._start.get(x) or (x.lb + x.ub) / 2 for x in self._model.vars} iters_no_improvement = 0 for i in range(max_iters): - # Add cuts for violated nonlinear constraints for constr in self.nonlinear_constrs: if constr(query_point=query_point) > self.infeasibility_tol: @@ -257,10 +254,7 @@ def optimize( self._model.add_constr(expr <= 0) # Add objective cut - expr = mip.xsum( - term.generate_cut(query_point=query_point) - for term in self.objective_terms - ) + expr = mip.xsum(term.generate_cut(query_point=query_point) for term in self.objective_terms) if self.minimize: self._model.add_constr(bound >= expr) else: @@ -274,20 +268,15 @@ def optimize( mip.OptimizationStatus.OPTIMAL, mip.OptimizationStatus.FEASIBLE, ): - logging.info( - f"Solve unsuccessful - exiting with optimization status: '{status.value}'." - ) + logging.info(f"Solve unsuccessful - exiting with optimization status: '{status.value}'.") self._status = status return self.status # Update best solution/objective value and query point solution = {var: var.x for var in self._model.vars} - objective_value_new = sum( - term(query_point=solution) for term in self.objective_terms - ) + objective_value_new = sum(term(query_point=solution) for term in self.objective_terms) if self.minimize == (objective_value_new < self.objective_value) and all( - constr(solution) <= self.infeasibility_tol - for constr in self.nonlinear_constrs + constr(solution) <= self.infeasibility_tol for constr in self.nonlinear_constrs ): iters_no_improvement = 0 self._objective_value = objective_value_new @@ -297,8 +286,7 @@ def optimize( iters_no_improvement += 1 if self.smoothing is not None: query_point = { - var: self.smoothing * query_point[var] - + (1 - self.smoothing) * solution[var] + var: self.smoothing * query_point[var] + (1 - self.smoothing) * solution[var] for var in self._model.vars } else: @@ -306,13 +294,9 @@ def optimize( # Update best bound (clip values to prevent numerical errors from affecting termination logic) if self.minimize: - self._best_bound = np.clip( - bound.x, a_min=self.best_bound, a_max=self.objective_value - ) + self._best_bound = np.clip(bound.x, a_min=self.best_bound, a_max=self.objective_value) else: - self._best_bound = np.clip( - bound.x, a_min=self.objective_value, a_max=self.best_bound - ) + self._best_bound = np.clip(bound.x, a_min=self.objective_value, a_max=self.best_bound) # Update log self._search_log.append( @@ -331,20 +315,16 @@ def optimize( # Check early termination conditions if self.gap <= self.max_gap or self.gap_abs <= self.max_gap_abs: - logging.info( - f"Optimality tolerance reached - terminating search early." - ) + logging.info("Optimality tolerance reached - terminating search early.") self._status = mip.OptimizationStatus.OPTIMAL return self.status if max_iters_no_improvement is not None: if iters_no_improvement >= max_iters_no_improvement: - logging.info( - f"Max iterations without improvement reached - terminating search early." - ) + logging.info("Max iterations without improvement reached - terminating search early.") self._status = mip.OptimizationStatus.FEASIBLE return self.status - logging.info(f"Max iterations reached - terminating search.") + logging.info("Max iterations reached - terminating search.") if self.best_solution: self._status = mip.OptimizationStatus.FEASIBLE else: @@ -355,9 +335,7 @@ def var_by_name(self, name: str) -> mip.Var: """Get a variable by name.""" return self._model.var_by_name(name=name) - def var_value( - self, x: Union[mip.Var, mip.LinExprTensor, str] - ) -> Union[float, np.ndarray]: + def var_value(self, x: mip.Var | mip.LinExprTensor | str) -> float | np.ndarray: """Get the value one or more decision variables corresponding to the best solution. Args: @@ -373,9 +351,7 @@ def var_value( if isinstance(x, mip.Var): return self.best_solution[x] if isinstance(x, mip.LinExprTensor): - return np.array([self.best_solution[var] for var in x.flatten()]).reshape( - x.shape - ) + return np.array([self.best_solution[var] for var in x.flatten()]).reshape(x.shape) if isinstance(x, Iterable): return np.array([self.best_solution[var] for var in x]) raise TypeError(f"Input of type '{type(x)}' not supported.") @@ -428,9 +404,7 @@ def best_bound(self) -> float: @property def gap(self) -> float: """Get the (relative) optimality gap.""" - return self.gap_abs / max( - min(abs(self.objective_value), abs(self.best_bound)), 1e-10 - ) + return self.gap_abs / max(min(abs(self.objective_value), abs(self.best_bound)), 1e-10) @property def gap_abs(self) -> float: @@ -448,7 +422,7 @@ def search_log(self) -> pd.DataFrame: return pd.DataFrame(self._search_log).set_index("iteration") @staticmethod - def sum(terms: Iterable[Union[mip.Var, mip.LinExpr]]) -> mip.LinExpr: + def sum(terms: Iterable[mip.Var | mip.LinExpr]) -> mip.LinExpr: """Create a linear expression from a summation.""" return mip.xsum(terms=terms) @@ -456,14 +430,14 @@ def _validate_params(self) -> None: check_scalar( x=self.max_gap, name="max_gap", - lb=0.0, + lb=0, var_type=float, include_boundaries=False, ) check_scalar( x=self.max_gap_abs, name="max_gap_abs", - lb=0.0, + lb=0, var_type=float, include_boundaries=False, ) @@ -471,7 +445,7 @@ def _validate_params(self) -> None: x=self.infeasibility_tol, name="feasibility_tol", var_type=float, - lb=0.0, + lb=0, include_boundaries=False, ) if self.smoothing is not None: @@ -479,8 +453,8 @@ def _validate_params(self) -> None: x=self.smoothing, name="smoothing", var_type=float, - lb=0.0, - ub=1.0, + lb=0, + ub=1, include_boundaries=False, ) if self.log_freq is not None: @@ -493,7 +467,7 @@ def _validate_params(self) -> None: ) @staticmethod - def _validate_bounds(lb: float, ub: float, var_type: str) -> tuple[float, float]: + def _validate_bounds(lb: float | int, ub: float | int, var_type: str) -> tuple[float | int, float | int]: if var_type == mip.BINARY: lb, ub = 0, 1 else: diff --git a/src/halfspace/utils.py b/src/halfspace/utils.py index b43779e..b2c6ba9 100644 --- a/src/halfspace/utils.py +++ b/src/halfspace/utils.py @@ -1,8 +1,9 @@ """Utility functions for the `halfspace` package.""" import logging +from typing import Iterable, Any, Type -from typing import Union, Iterable, Optional, Any, Type +import numpy as np def log_table_header(columns: Iterable[str], width: int = 15) -> None: @@ -25,7 +26,7 @@ def log_table_header(columns: Iterable[str], width: int = 15) -> None: logging.info(line) -def log_table_row(values: Iterable[Union[float, int]], width: int = 15) -> None: +def log_table_row(values: Iterable[float | int], width: int = 15) -> None: """Log a table row. Logging level is set to `logging.INFO`. @@ -38,21 +39,16 @@ def log_table_row(values: Iterable[Union[float, int]], width: int = 15) -> None: Returns: None """ - values = [ - (f"{{:{width}}}" if isinstance(value, int) else f"{{:{width}.3e}}").format( - value - ) - for value in values - ] - logging.info("|{}|".format("|".join(values))) + values_ = [(f"{{:{width}}}" if isinstance(value, int) else f"{{:{width}.3e}}").format(value) for value in values] + logging.info("|{}|".format("|".join(values_))) def check_scalar( x: Any, name: str, - var_type: Optional[Union[Type, tuple[Type, ...]]] = None, - lb: Optional[Union[float, int]] = None, - ub: Optional[Union[float, int]] = None, + var_type: Type | tuple[Type, ...] | None = None, + lb: float | int | None = None, + ub: float | int | None = None, include_boundaries: bool = True, ) -> None: """Check that a scalar satisfies certain conditions. @@ -74,22 +70,21 @@ def check_scalar( Returns: None """ if var_type is not None: - assert isinstance( - x, var_type - ), f"Variable '{name}' ({type(x)}) is not expected type ({var_type})." + assert isinstance(x, var_type), f"Variable '{name}' ({type(x)}) is not expected type ({var_type})." if lb is not None: if include_boundaries: assert x >= lb, f"Variable '{name}' ({x}) is less than lower bound ({lb})." else: - assert ( - x > lb - ), f"Variable '{name}' ({x}) is less than or equal to lower bound ({lb})." + assert x > lb, f"Variable '{name}' ({x}) is less than or equal to lower bound ({lb})." if ub is not None: if include_boundaries: - assert ( - x <= ub - ), f"Variable '{name}' ({x}) is greater than lower bound ({ub})." + assert x <= ub, f"Variable '{name}' ({x}) is greater than lower bound ({ub})." else: - assert ( - x < ub - ), f"Variable '{name}' ({x}) is greater than or equal to lower bound ({ub})." + assert x < ub, f"Variable '{name}' ({x}) is greater than or equal to lower bound ({ub})." + + +def standard_basis_vector(i: int, n_dim: int) -> np.ndarray: + """Return the ith standard basis vector in R^n.""" + x = np.zeros(n_dim) + x[i] = 1 + return x diff --git a/tests/test_convex_term.py b/tests/test_convex_term.py index 14249eb..d3953fc 100644 --- a/tests/test_convex_term.py +++ b/tests/test_convex_term.py @@ -1,5 +1,3 @@ -from typing import Union, Optional - import mip import numpy as np import pytest @@ -13,7 +11,7 @@ def _process_callbacks( grad: Grad, combine_grad: bool, approximate_grad: bool, -) -> tuple[Union[Func, FuncGrad], Optional[Union[Grad, bool]]]: +) -> tuple[Func | FuncGrad, Grad | bool | None]: if combine_grad and approximate_grad: raise ValueError if combine_grad: @@ -30,14 +28,12 @@ def func_with_grad(*args, **kwargs): def _check_convex_term( term: ConvexTerm, expected_value: float, - expected_grad: Union[float, np.ndarray], + expected_grad: float | np.ndarray, expected_is_multivariable: bool, query_point: QueryPoint, ): # Check evaluation without gradient - assert term(query_point=query_point, return_grad=False) == pytest.approx( - expected_value - ) + assert term(query_point=query_point, return_grad=False) == pytest.approx(expected_value) # Check evaluation with gradient value, grad = term(query_point=query_point, return_grad=True) @@ -66,9 +62,7 @@ def model() -> Model: ({"x": 1}, 1, 2), ], ) -@pytest.mark.parametrize( - ["combine_grad", "approximate_grad"], [(True, False), (False, True), (False, False)] -) +@pytest.mark.parametrize(["combine_grad", "approximate_grad"], [(True, False), (False, True), (False, False)]) def test_single_variable_term( model: Model, query_point: dict[str, float], @@ -93,9 +87,7 @@ def test_single_variable_term( expected_value=expected_value, expected_grad=expected_grad, expected_is_multivariable=False, - query_point={ - model.var_by_name(name=name): value for name, value in query_point.items() - }, + query_point={model.var_by_name(name=name): value for name, value in query_point.items()}, ) @@ -106,9 +98,7 @@ def test_single_variable_term( ({"x": 1, "y": 2}, 5, np.array([2, 4])), ], ) -@pytest.mark.parametrize( - ["combine_grad", "approximate_grad"], [(True, False), (False, True), (False, False)] -) +@pytest.mark.parametrize(["combine_grad", "approximate_grad"], [(True, False), (False, True), (False, False)]) def test_multivariable_term( model: Model, query_point: dict[str, float], @@ -133,9 +123,7 @@ def test_multivariable_term( expected_value=expected_value, expected_grad=expected_grad, expected_is_multivariable=True, - query_point={ - model.var_by_name(name=name): value for name, value in query_point.items() - }, + query_point={model.var_by_name(name=name): value for name, value in query_point.items()}, ) @@ -146,9 +134,7 @@ def test_multivariable_term( ({"z_0": 1, "z_1": 2}, 5, np.array([2, 4])), ], ) -@pytest.mark.parametrize( - ["combine_grad", "approximate_grad"], [(True, False), (False, True), (False, False)] -) +@pytest.mark.parametrize(["combine_grad", "approximate_grad"], [(True, False), (False, True), (False, False)]) def test_var_tensor_term( model: Model, query_point: dict[str, float], @@ -173,7 +159,5 @@ def test_var_tensor_term( expected_value=expected_value, expected_grad=expected_grad, expected_is_multivariable=True, - query_point={ - model.var_by_name(name=name): value for name, value in query_point.items() - }, + query_point={model.var_by_name(name=name): value for name, value in query_point.items()}, ) diff --git a/tests/test_model.py b/tests/test_model.py index 076ab36..95a7c49 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -1,5 +1,3 @@ -from typing import Optional - import mip import numpy as np import pandas as pd @@ -13,14 +11,12 @@ def _check_solution( model: Model, - expected_objective_value: Optional[float], - expected_solution: Optional[dict[Var, float]], + expected_objective_value: float | None = None, + expected_solution: dict[Var, float] | None = None, expected_status: mip.OptimizationStatus = mip.OptimizationStatus.OPTIMAL, ): if expected_objective_value is not None: - assert model.objective_value == pytest.approx( - expected_objective_value, abs=model.max_gap_abs - ) + assert model.objective_value == pytest.approx(expected_objective_value, abs=model.max_gap_abs) if expected_solution is not None: for x, expected_value in expected_solution.items(): assert model.var_value(x=x) == pytest.approx(expected_value, abs=VAR_TOL) @@ -74,9 +70,7 @@ def test_multivariable_variable_no_constraints(): model = Model() x = model.add_var(lb=0, ub=1) y = model.add_var(lb=0, ub=1) - model.add_objective_term( - var=(x, y), func=lambda x, y: (x - 0.25) ** 2 + (y - 0.25) ** 2 + 1 - ) + model.add_objective_term(var=(x, y), func=lambda x, y: (x - 0.25) ** 2 + (y - 0.25) ** 2 + 1) model.optimize() _check_solution( model=model, @@ -88,9 +82,7 @@ def test_multivariable_variable_no_constraints(): def test_multivariable_variable_as_tensor_no_constraints(): model = Model() x = model.add_var_tensor(shape=(2,), lb=0, ub=1) - model.add_objective_term( - var=x, func=lambda x: (x[0] - 0.25) ** 2 + (x[1] - 0.25) ** 2 + 1 - ) + model.add_objective_term(var=x, func=lambda x: (x[0] - 0.25) ** 2 + (x[1] - 0.25) ** 2 + 1) model.optimize() _check_solution( model=model, @@ -103,9 +95,7 @@ def test_multivariable_linear_constraint(): model = Model() x = model.add_var(lb=0, ub=1) y = model.add_var(lb=0, ub=1) - model.add_objective_term( - var=(x, y), func=lambda x, y: (x - 0.25) ** 2 + (y - 0.25) ** 2 + 1 - ) + model.add_objective_term(var=(x, y), func=lambda x, y: (x - 0.25) ** 2 + (y - 0.25) ** 2 + 1) model.add_linear_constr(100 * x + y <= 0.25) model.optimize() _check_solution( @@ -119,29 +109,18 @@ def test_multivariable_linear_constraint_infeasible(): model = Model() x = model.add_var(lb=0, ub=1) y = model.add_var(lb=0, ub=1) - model.add_objective_term( - var=(x, y), func=lambda x, y: (x - 0.25) ** 2 + (y - 0.25) ** 2 + 1 - ) + model.add_objective_term(var=(x, y), func=lambda x, y: (x - 0.25) ** 2 + (y - 0.25) ** 2 + 1) model.add_linear_constr(x + y >= 3) model.optimize() - _check_solution( - model=model, - expected_objective_value=None, - expected_solution=None, - expected_status=mip.OptimizationStatus.INFEASIBLE, - ) + _check_solution(model=model, expected_status=mip.OptimizationStatus.INFEASIBLE) def test_multivariable_nonlinear_constraint(): model = Model(max_gap_abs=1e-2) x = model.add_var(lb=0, ub=1) y = model.add_var(lb=0, ub=1) - model.add_objective_term( - var=(x, y), func=lambda x, y: (x - 0.25) ** 2 + (y - 0.25) ** 2 + 1 - ) - model.add_nonlinear_constr( - var=(x, y), func=lambda x, y: (80 * x) ** 2 + y**2 - 0.25**2 - ) + model.add_objective_term(var=(x, y), func=lambda x, y: (x - 0.25) ** 2 + (y - 0.25) ** 2 + 1) + model.add_nonlinear_constr(var=(x, y), func=lambda x, y: (80 * x) ** 2 + y**2 - 0.25**2) model.optimize() _check_solution( model=model, @@ -154,14 +133,7 @@ def test_multivariable_nonlinear_constraint_infeasible(): model = Model(max_gap_abs=1e-2) x = model.add_var(lb=0, ub=1) y = model.add_var(lb=0, ub=1) - model.add_objective_term( - var=(x, y), func=lambda x, y: (x - 0.25) ** 2 + (y - 0.25) ** 2 + 1 - ) + model.add_objective_term(var=(x, y), func=lambda x, y: (x - 0.25) ** 2 + (y - 0.25) ** 2 + 1) model.add_nonlinear_constr(var=(x, y), func=lambda x, y: np.exp(x + y) + 1) model.optimize() - _check_solution( - model=model, - expected_objective_value=None, - expected_solution=None, - expected_status=mip.OptimizationStatus.INFEASIBLE, - ) + _check_solution(model=model, expected_status=mip.OptimizationStatus.INFEASIBLE) diff --git a/tests/test_utils.py b/tests/test_utils.py index 1d965a1..dde117b 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,5 +1,6 @@ from contextlib import nullcontext as does_not_raise -from typing import Union, Iterable, Any, Optional, Type +from numbers import Real +from typing import Iterable, Any, Type import pytest @@ -15,7 +16,7 @@ def test_log_table_header(columns: Iterable[str], width: int): @pytest.mark.parametrize("values", [[1, 1.0, 2e10, 3e-10]]) @pytest.mark.parametrize("width", [10, 15]) -def test_log_table_row(values: Iterable[Union[float, int]], width: int): +def test_log_table_row(values: Iterable[Real], width: int): log_table_row(values=values, width=width) # TODO: add log checks @@ -38,9 +39,9 @@ def test_log_table_row(values: Iterable[Union[float, int]], width: int): def test_check_scalar( x: Any, name: str, - var_type: Optional[Union[Type, tuple[Type, ...]]], - lb: Optional[Union[float, int]], - ub: Optional[Union[float, int]], + var_type: Type | tuple[Type, ...] | None, + lb: Real | None, + ub: Real | None, include_boundaries: bool, expectation, ):