diff --git a/CHANGELOG.md b/CHANGELOG.md index 52df46228..077d56b12 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ - Fixed lotsizing_lazy example ### Changed - changed default value of enablepricing flag to True +- Speed up MatrixExpr.add.reduce via quicksum ### Removed ## 6.0.0 - 2025.xx.yy diff --git a/src/pyscipopt/expr.pxi b/src/pyscipopt/expr.pxi index f0c406fcb..49189bc27 100644 --- a/src/pyscipopt/expr.pxi +++ b/src/pyscipopt/expr.pxi @@ -42,7 +42,6 @@ # which should, in princple, modify the expr. However, since we do not implement __isub__, __sub__ # gets called (I guess) and so a copy is returned. # Modifying the expression directly would be a bug, given that the expression might be re-used by the user. -include "matrix.pxi" def _is_number(e): diff --git a/src/pyscipopt/matrix.pxi b/src/pyscipopt/matrix.pxi index 1a6a09cf3..5698d53a5 100644 --- a/src/pyscipopt/matrix.pxi +++ b/src/pyscipopt/matrix.pxi @@ -51,60 +51,16 @@ def _matrixexpr_richcmp(self, other, op): class MatrixExpr(np.ndarray): - def sum( - self, - axis: Optional[Union[int, Tuple[int, ...]]] = None, - keepdims: bool = False, - **kwargs, - ) -> Union[Expr, MatrixExpr]: - """ - Return the sum of the array elements over the given axis. - - Parameters - ---------- - axis : None or int or tuple of ints, optional - Axis or axes along which a sum is performed. The default, axis=None, will - sum all of the elements of the input array. If axis is negative it counts - from the last to the first axis. If axis is a tuple of ints, a sum is - performed on all of the axes specified in the tuple instead of a single axis - or all the axes as before. - - keepdims : bool, optional - If this is set to True, the axes which are reduced are left in the result as - dimensions with size one. With this option, the result will broadcast - correctly against the input array. - - **kwargs : ignored - Additional keyword arguments are ignored. They exist for compatibility - with `numpy.ndarray.sum`. - - Returns - ------- - Expr or MatrixExpr - If the sum is performed over all axes, return an Expr, otherwise return - a MatrixExpr. - - """ - axis: Tuple[int, ...] = normalize_axis_tuple( - range(self.ndim) if axis is None else axis, self.ndim - ) - if len(axis) == self.ndim: - res = quicksum(self.flat) - return ( - np.array([res], dtype=object).reshape([1] * self.ndim).view(MatrixExpr) - if keepdims - else res - ) - - keep_axes = tuple(i for i in range(self.ndim) if i not in axis) - shape = ( - tuple(1 if i in axis else self.shape[i] for i in range(self.ndim)) - if keepdims - else tuple(self.shape[i] for i in keep_axes) - ) - return np.apply_along_axis( - quicksum, -1, self.transpose(keep_axes + axis).reshape(shape + (-1,)) - ).view(MatrixExpr) + def __array_ufunc__(self, ufunc, method, *args, **kwargs): + if method == "reduce": + if ufunc is np.add and isinstance(args[0], MatrixExpr): + return _core_sum(args[0], **kwargs) + + args = _ensure_array(args, convert_scalar=True) + if "out" in kwargs: + kwargs["out"] = _ensure_array(kwargs["out"]) + res = super().__array_ufunc__(ufunc, method, *args, **kwargs) + return res.view(MatrixExpr) if isinstance(res, np.ndarray) else res def __le__(self, other: Union[float, int, "Expr", np.ndarray, "MatrixExpr"]) -> MatrixExprCons: return _matrixexpr_richcmp(self, other, 1) @@ -146,7 +102,8 @@ class MatrixExpr(np.ndarray): return super().__rsub__(other).view(MatrixExpr) def __matmul__(self, other): - return super().__matmul__(other).view(MatrixExpr) + res = super().__matmul__(other) + return res.view(MatrixExpr) if isinstance(res, np.ndarray) else res class MatrixGenExpr(MatrixExpr): pass @@ -161,3 +118,75 @@ class MatrixExprCons(np.ndarray): def __eq__(self, other): raise NotImplementedError("Cannot compare MatrixExprCons with '=='.") + + +cdef inline tuple _ensure_array(tuple args, bool convert_scalar = False): + cdef object x + if not convert_scalar: + return tuple( + x.view(np.ndarray) if isinstance(x, np.ndarray) else x + for x in args + ) + return tuple( + x.view(np.ndarray) if isinstance(x, np.ndarray) else np.array(x, dtype=object) + for x in args + ) + + +def _core_sum( + a: MatrixExpr, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + **kwargs, +) -> Union[Expr, MatrixExpr]: + """ + Return the sum of the array elements over the given axis. + + Parameters + ---------- + a : MatrixExpr + + axis : None or int or tuple of ints, optional + Axis or axes along which a sum is performed. The default, axis=None, will + sum all of the elements of the input array. If axis is negative it counts + from the last to the first axis. If axis is a tuple of ints, a sum is + performed on all of the axes specified in the tuple instead of a single axis + or all the axes as before. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the result as + dimensions with size one. With this option, the result will broadcast + correctly against the input array. + + **kwargs : ignored + Additional keyword arguments are ignored. They exist for compatibility + with `numpy.ndarray.sum`. + + Returns + ------- + Expr or MatrixExpr + If the sum is performed over all axes, return an Expr, otherwise return + a MatrixExpr. + + """ + axis: Tuple[int, ...] = normalize_axis_tuple( + range(a.ndim) if axis is None else axis, a.ndim + ) + if len(axis) == a.ndim: + res = quicksum(a.flat) + return ( + np.array([res], dtype=object).reshape([1] * a.ndim).view(MatrixExpr) + if keepdims + else res + ) + + keep_axes = tuple(i for i in range(a.ndim) if i not in axis) + shape = ( + tuple(1 if i in axis else a.shape[i] for i in range(a.ndim)) + if keepdims + else tuple(a.shape[i] for i in keep_axes) + ) + return np.apply_along_axis( + quicksum, -1, a.transpose(keep_axes + axis).reshape(shape + (-1,)) + ).view(MatrixExpr) + diff --git a/src/pyscipopt/scip.pyi b/src/pyscipopt/scip.pyi index 831dd02ed..1e703c006 100644 --- a/src/pyscipopt/scip.pyi +++ b/src/pyscipopt/scip.pyi @@ -509,9 +509,6 @@ class MatrixConstraint(numpy.ndarray): def isStickingAtNode(self) -> Incomplete: ... class MatrixExpr(numpy.ndarray): - def sum( # type: ignore[override] - self, axis: Incomplete = ..., keepdims: Incomplete = ..., **kwargs: Incomplete - ) -> Incomplete: ... def __add__(self, other: Incomplete) -> Incomplete: ... def __eq__(self, other: Incomplete)-> Incomplete: ... def __ge__(self, other: Incomplete) -> MatrixExprCons: ... diff --git a/tests/test_matrix_variable.py b/tests/test_matrix_variable.py index e4758f077..7916b4d10 100644 --- a/tests/test_matrix_variable.py +++ b/tests/test_matrix_variable.py @@ -248,11 +248,11 @@ def test_matrix_sum_axis(): ) def test_matrix_sum_result(axis, keepdims): # directly compare the result of np.sum and MatrixExpr.sum - _getVal = np.vectorize(lambda e: e.terms[CONST]) + _getVal = np.vectorize(lambda e: e[CONST]) a = np.arange(6).reshape((1, 2, 3)) np_res = a.sum(axis, keepdims=keepdims) - scip_res = MatrixExpr.sum(a, axis, keepdims=keepdims) + scip_res = a.view(MatrixExpr).sum(axis, keepdims=keepdims) assert (np_res == _getVal(scip_res)).all() assert np_res.shape == _getVal(scip_res).shape @@ -262,17 +262,17 @@ def test_matrix_sum_axis_is_none_performance(n): model = Model() x = model.addMatrixVar((n, n)) - # Original sum via `np.ndarray.sum`, `np.sum` will call subclass method - start_orig = time() - np.ndarray.sum(x) - end_orig = time() + # Original sum via `np.ndarray.sum` + start = time() + x.view(np.ndarray).sum() + orig = time() - start # Optimized sum via `quicksum` - start_matrix = time() + start = time() x.sum() - end_matrix = time() + matrix = time() - start - assert model.isGT(end_orig - start_orig, end_matrix - start_matrix) + assert model.isGT(orig, matrix) @pytest.mark.parametrize("n", [50, 100]) @@ -280,17 +280,43 @@ def test_matrix_sum_axis_not_none_performance(n): model = Model() x = model.addMatrixVar((n, n)) - # Original sum via `np.ndarray.sum`, `np.sum` will call subclass method - start_orig = time() - np.ndarray.sum(x, axis=0) - end_orig = time() + # Original sum via `np.ndarray.sum` + start = time() + x.view(np.ndarray).sum(axis=0) + orig = time() - start # Optimized sum via `quicksum` - start_matrix = time() + start = time() x.sum(axis=0) - end_matrix = time() + matrix = time() - start + + assert model.isGT(orig, matrix) + + +@pytest.mark.parametrize("n", [50, 100]) +def test_matrix_mean_performance(n): + model = Model() + x = model.addMatrixVar((n, n)) + + # Original sum via `np.ndarray.sum` + start = time() + x.view(np.ndarray).mean(axis=0) + orig = time() - start + + # Optimized sum via `quicksum` + start = time() + x.mean(axis=0) + matrix = time() - start + + assert model.isGT(orig, matrix) + + +def test_matrix_mean(): + model = Model() + x = model.addMatrixVar((2, 2)) - assert model.isGT(end_orig - start_orig, end_matrix - start_matrix) + assert isinstance(x.mean(), Expr) + assert isinstance(x.mean(1), MatrixExpr) def test_add_cons_matrixVar(): @@ -574,7 +600,7 @@ def test_matrix_matmul_return_type(): # test 1D @ 1D → 0D x = m.addMatrixVar(3) - assert type(x @ x) is MatrixExpr + assert type(x @ x) is Expr # test 1D @ 1D → 2D assert type(x[:, None] @ x[None, :]) is MatrixExpr