Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
- Fixed lotsizing_lazy example
### Changed
- changed default value of enablepricing flag to True
- Speed up MatrixExpr.add.reduce via quicksum
### Removed

## 6.0.0 - 2025.xx.yy
Expand Down
1 change: 0 additions & 1 deletion src/pyscipopt/expr.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
# which should, in princple, modify the expr. However, since we do not implement __isub__, __sub__
# gets called (I guess) and so a copy is returned.
# Modifying the expression directly would be a bug, given that the expression might be re-used by the user. </pre>
include "matrix.pxi"
Copy link
Contributor Author

@Zeroto521 Zeroto521 Jan 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

scip.pyi also has this line.
cdef function (_ensure_array) can't be added twice.



def _is_number(e):
Expand Down
139 changes: 84 additions & 55 deletions src/pyscipopt/matrix.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -51,60 +51,16 @@ def _matrixexpr_richcmp(self, other, op):

class MatrixExpr(np.ndarray):

def sum(
self,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
**kwargs,
) -> Union[Expr, MatrixExpr]:
"""
Return the sum of the array elements over the given axis.

Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default, axis=None, will
sum all of the elements of the input array. If axis is negative it counts
from the last to the first axis. If axis is a tuple of ints, a sum is
performed on all of the axes specified in the tuple instead of a single axis
or all the axes as before.

keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the input array.

**kwargs : ignored
Additional keyword arguments are ignored. They exist for compatibility
with `numpy.ndarray.sum`.

Returns
-------
Expr or MatrixExpr
If the sum is performed over all axes, return an Expr, otherwise return
a MatrixExpr.

"""
axis: Tuple[int, ...] = normalize_axis_tuple(
range(self.ndim) if axis is None else axis, self.ndim
)
if len(axis) == self.ndim:
res = quicksum(self.flat)
return (
np.array([res], dtype=object).reshape([1] * self.ndim).view(MatrixExpr)
if keepdims
else res
)

keep_axes = tuple(i for i in range(self.ndim) if i not in axis)
shape = (
tuple(1 if i in axis else self.shape[i] for i in range(self.ndim))
if keepdims
else tuple(self.shape[i] for i in keep_axes)
)
return np.apply_along_axis(
quicksum, -1, self.transpose(keep_axes + axis).reshape(shape + (-1,))
).view(MatrixExpr)
def __array_ufunc__(self, ufunc, method, *args, **kwargs):
if method == "reduce":
if ufunc is np.add and isinstance(args[0], MatrixExpr):
return _core_sum(args[0], **kwargs)
Copy link
Contributor Author

@Zeroto521 Zeroto521 Jan 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It can't use MatrixExpr.sum directly.
Or it will crush the Python kernel.
Use Matrix.sum the calling chain like:

  1. x.mean(1)
  2. x.sum(1)
  3. np.add.reduce
  4. __array_ufunc__
  5. x.sum(1)
  6. __array_ufunc__
  7. np.add.reduce
  8. ...


args = _ensure_array(args, convert_scalar=True)
if "out" in kwargs:
kwargs["out"] = _ensure_array(kwargs["out"])
res = super().__array_ufunc__(ufunc, method, *args, **kwargs)
return res.view(MatrixExpr) if isinstance(res, np.ndarray) else res

def __le__(self, other: Union[float, int, "Expr", np.ndarray, "MatrixExpr"]) -> MatrixExprCons:
return _matrixexpr_richcmp(self, other, 1)
Expand Down Expand Up @@ -146,7 +102,8 @@ class MatrixExpr(np.ndarray):
return super().__rsub__(other).view(MatrixExpr)

def __matmul__(self, other):
return super().__matmul__(other).view(MatrixExpr)
res = super().__matmul__(other)
return res.view(MatrixExpr) if isinstance(res, np.ndarray) else res

class MatrixGenExpr(MatrixExpr):
pass
Expand All @@ -161,3 +118,75 @@ class MatrixExprCons(np.ndarray):

def __eq__(self, other):
raise NotImplementedError("Cannot compare MatrixExprCons with '=='.")


cdef inline tuple _ensure_array(tuple args, bool convert_scalar = False):
cdef object x
if not convert_scalar:
return tuple(
x.view(np.ndarray) if isinstance(x, np.ndarray) else x
for x in args
)
return tuple(
x.view(np.ndarray) if isinstance(x, np.ndarray) else np.array(x, dtype=object)
for x in args
)


def _core_sum(
a: MatrixExpr,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
**kwargs,
) -> Union[Expr, MatrixExpr]:
"""
Return the sum of the array elements over the given axis.

Parameters
----------
a : MatrixExpr

axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default, axis=None, will
sum all of the elements of the input array. If axis is negative it counts
from the last to the first axis. If axis is a tuple of ints, a sum is
performed on all of the axes specified in the tuple instead of a single axis
or all the axes as before.

keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the input array.

**kwargs : ignored
Additional keyword arguments are ignored. They exist for compatibility
with `numpy.ndarray.sum`.

Returns
-------
Expr or MatrixExpr
If the sum is performed over all axes, return an Expr, otherwise return
a MatrixExpr.

"""
axis: Tuple[int, ...] = normalize_axis_tuple(
range(a.ndim) if axis is None else axis, a.ndim
)
if len(axis) == a.ndim:
res = quicksum(a.flat)
return (
np.array([res], dtype=object).reshape([1] * a.ndim).view(MatrixExpr)
if keepdims
else res
)

keep_axes = tuple(i for i in range(a.ndim) if i not in axis)
shape = (
tuple(1 if i in axis else a.shape[i] for i in range(a.ndim))
if keepdims
else tuple(a.shape[i] for i in keep_axes)
)
return np.apply_along_axis(
quicksum, -1, a.transpose(keep_axes + axis).reshape(shape + (-1,))
).view(MatrixExpr)

3 changes: 0 additions & 3 deletions src/pyscipopt/scip.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -509,9 +509,6 @@ class MatrixConstraint(numpy.ndarray):
def isStickingAtNode(self) -> Incomplete: ...

class MatrixExpr(numpy.ndarray):
def sum( # type: ignore[override]
self, axis: Incomplete = ..., keepdims: Incomplete = ..., **kwargs: Incomplete
) -> Incomplete: ...
def __add__(self, other: Incomplete) -> Incomplete: ...
def __eq__(self, other: Incomplete)-> Incomplete: ...
def __ge__(self, other: Incomplete) -> MatrixExprCons: ...
Expand Down
60 changes: 43 additions & 17 deletions tests/test_matrix_variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,11 +248,11 @@ def test_matrix_sum_axis():
)
def test_matrix_sum_result(axis, keepdims):
# directly compare the result of np.sum and MatrixExpr.sum
_getVal = np.vectorize(lambda e: e.terms[CONST])
_getVal = np.vectorize(lambda e: e[CONST])
a = np.arange(6).reshape((1, 2, 3))

np_res = a.sum(axis, keepdims=keepdims)
scip_res = MatrixExpr.sum(a, axis, keepdims=keepdims)
scip_res = a.view(MatrixExpr).sum(axis, keepdims=keepdims)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

np.ndarray.sum is MatrixExpr.sum now.
MatrixExpr.sum(a) will return np.ndarray(..., dtype=int) not MatrixExpr

assert (np_res == _getVal(scip_res)).all()
assert np_res.shape == _getVal(scip_res).shape

Expand All @@ -262,35 +262,61 @@ def test_matrix_sum_axis_is_none_performance(n):
model = Model()
x = model.addMatrixVar((n, n))

# Original sum via `np.ndarray.sum`, `np.sum` will call subclass method
start_orig = time()
np.ndarray.sum(x)
end_orig = time()
# Original sum via `np.ndarray.sum`
start = time()
x.view(np.ndarray).sum()
Copy link
Contributor Author

@Zeroto521 Zeroto521 Jan 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

np.ndarray.sum is MatrixExpr.sum now. They will both call __array_ufunc__ protocol. So it has to convert MatrixVariable to a normal np.ndarray.

orig = time() - start

# Optimized sum via `quicksum`
start_matrix = time()
start = time()
x.sum()
end_matrix = time()
matrix = time() - start

assert model.isGT(end_orig - start_orig, end_matrix - start_matrix)
assert model.isGT(orig, matrix)


@pytest.mark.parametrize("n", [50, 100])
def test_matrix_sum_axis_not_none_performance(n):
model = Model()
x = model.addMatrixVar((n, n))

# Original sum via `np.ndarray.sum`, `np.sum` will call subclass method
start_orig = time()
np.ndarray.sum(x, axis=0)
end_orig = time()
# Original sum via `np.ndarray.sum`
start = time()
x.view(np.ndarray).sum(axis=0)
orig = time() - start

# Optimized sum via `quicksum`
start_matrix = time()
start = time()
x.sum(axis=0)
end_matrix = time()
matrix = time() - start

assert model.isGT(orig, matrix)


@pytest.mark.parametrize("n", [50, 100])
def test_matrix_mean_performance(n):
model = Model()
x = model.addMatrixVar((n, n))

# Original sum via `np.ndarray.sum`
start = time()
x.view(np.ndarray).mean(axis=0)
orig = time() - start

# Optimized sum via `quicksum`
start = time()
x.mean(axis=0)
matrix = time() - start

assert model.isGT(orig, matrix)


def test_matrix_mean():
model = Model()
x = model.addMatrixVar((2, 2))

assert model.isGT(end_orig - start_orig, end_matrix - start_matrix)
assert isinstance(x.mean(), Expr)
assert isinstance(x.mean(1), MatrixExpr)


def test_add_cons_matrixVar():
Expand Down Expand Up @@ -574,7 +600,7 @@ def test_matrix_matmul_return_type():

# test 1D @ 1D → 0D
x = m.addMatrixVar(3)
assert type(x @ x) is MatrixExpr
assert type(x @ x) is Expr

# test 1D @ 1D → 2D
assert type(x[:, None] @ x[None, :]) is MatrixExpr
Expand Down
Loading