Nothing
#####
## DO NOT EDIT THIS FILE!! EDIT THE SOURCE INSTEAD: rsrc_tree/cvxcore/r/canonInterface.R
#####
## CVXPY SOURCE: cvxcore/python/canonInterface.py
## Added format_matrix and set_matrix_data.
get_problem_matrix <- function(linOps, id_to_col = integer(0),
var_length = 0L,
param_to_size = integer(0),
constr_offsets = integer(0)) {
lin_vec <- CVXcanon.LinOpVector()
## Ensure id_to_col_C is an integer vector with names retained
## (C++ equivalent of map<int, int>)
id_to_col_C <- id_to_col
storage.mode(id_to_col_C) <- "integer"
## tmp keeps C++ LinOp data in scope after build_lin_op_tree returns
tmp <- make_vec()
for (lin in linOps) {
tree <- build_lin_op_tree(lin, tmp)
tmp$push_back(tree)
lin_vec$push_back(tree)
}
if (typeof(constr_offsets) != "integer") {
cli_abort("get_problem_matrix: expecting integer vector for {.arg constr_offsets}.")
}
var_length_C <- as.integer(var_length)
if (length(constr_offsets) == 0)
problemData <- CVXcanon.build_matrix(lin_vec, id_to_col_C, var_length_C,
param_to_size)
else {
constr_offsets_C <- constr_offsets
storage.mode(constr_offsets_C) <- "integer"
problemData <- CVXcanon.build_matrix(lin_vec, id_to_col_C, var_length_C,
param_to_size, constr_offsets_C)
}
## Extract raw V/I/J from tensor (CONSTANT_ID = -1, vec_idx = 0)
V <- problemData$getV()
I <- problemData$getI()
J <- problemData$getJ()
## Separate constant entries (J >= var_length) from variable entries (J < var_length).
## In the tensor architecture, constants go at column var_length (not a separate vector).
## Reconstruct const_vec as a dense column vector for backward compatibility.
## Compute total constraint rows from linOps
num_rows <- sum(vapply(linOps, function(lo) {
as.integer(lo$shape[1]) * as.integer(lo$shape[2])
}, integer(1L)))
if (length(constr_offsets) > 0L && length(linOps) > 0L) {
## With custom offsets, total rows may be larger
last_idx <- length(linOps)
last_lo <- linOps[[last_idx]]
last_size <- as.integer(last_lo$shape[1]) * as.integer(last_lo$shape[2])
alt_rows <- constr_offsets[last_idx] + last_size
num_rows <- max(num_rows, alt_rows)
}
## Reconstruct const_vec from constant column entries
const_vec <- numeric(num_rows)
if (length(J) > 0L) {
const_mask <- J >= var_length_C
if (any(const_mask)) {
ci <- I[const_mask] ## 0-based row indices
cv <- V[const_mask]
for (k in seq_along(ci)) {
const_vec[ci[k] + 1L] <- const_vec[ci[k] + 1L] + cv[k]
}
}
}
list(V = V, I = I, J = J,
const_vec = matrix(const_vec, ncol = 1))
}
# -- get_problem_matrix_tensor -------------------------------------
## CVXPY SOURCE: cvxcore/python/cppbackend.py build_matrix() lines 57-99
## Extracts the full parameter tensor from C++ ProblemData.
##
## Returns a sparse Matrix of shape (constr_length * (var_length + 1), param_size_plus_one)
## where param_size_plus_one = sum of all param sizes (including CONSTANT_ID's 1).
## Each column corresponds to one element of the flattened parameter vector.
##
## param_to_size: named list, param_id_str -> size (must include CONSTANT_ID)
## param_id_map: named list, param_id_str -> column offset in tensor
get_problem_matrix_tensor <- function(linOps, id_to_col, var_length,
param_to_size, param_id_map) {
## Build LinOp tree and call C++
lin_vec <- CVXcanon.LinOpVector()
id_to_col_C <- id_to_col
storage.mode(id_to_col_C) <- "integer"
var_length_C <- as.integer(var_length)
## Build param_to_size as named integer vector for C++
pts <- as.integer(unlist(param_to_size))
names(pts) <- names(param_to_size)
tmp <- make_vec()
for (lin in linOps) {
tree <- build_lin_op_tree(lin, tmp)
tmp$push_back(tree)
lin_vec$push_back(tree)
}
problemData <- CVXcanon.build_matrix(lin_vec, id_to_col_C, var_length_C, pts)
## Compute dimensions
constr_length <- sum(vapply(linOps, function(lo) {
as.integer(lo$shape[1]) * as.integer(lo$shape[2])
}, integer(1L)))
n_flat_rows <- as.numeric(constr_length) * as.numeric(var_length_C + 1L)
param_size_plus_one <- sum(as.integer(unlist(param_to_size)))
## Iterate over all param_ids, extract per-element V/I/J, assemble tensor
## CVXPY SOURCE: cppbackend.py lines 57-99
## Collect chunks in lists, then flatten once (avoids O(n^2) c() growth)
V_chunks <- list()
I_chunks <- list()
J_chunks <- list()
ci <- 0L
for (pid_str in names(param_to_size)) {
pid <- as.integer(pid_str)
sz <- param_to_size[[pid_str]]
col_offset <- param_id_map[[pid_str]]
problemData$setParamId(pid)
for (i in seq_len(sz) - 1L) { # 0-based vec_idx
problemData$setVecIdx(i)
v <- problemData$getV()
ii <- problemData$getI()
jj <- problemData$getJ()
if (length(v) > 0L) {
ci <- ci + 1L
## Tensor row: I + J * constr_length (flattens 2D constraint/var to 1D)
I_chunks[[ci]] <- as.numeric(ii) + as.numeric(jj) * constr_length
## Tensor col: element index + param column offset
J_chunks[[ci]] <- rep(as.numeric(i + col_offset), length(v))
V_chunks[[ci]] <- v
}
}
}
## Build sparse tensor matrix (n_flat_rows x param_size_plus_one)
## I and J are 0-based; convert to 1-based for R
if (ci > 0L) {
all_V <- unlist(V_chunks, use.names = FALSE)
all_I <- unlist(I_chunks, use.names = FALSE)
all_J <- unlist(J_chunks, use.names = FALSE)
Matrix::sparseMatrix(
i = as.integer(all_I) + 1L,
j = as.integer(all_J) + 1L,
x = all_V,
dims = c(as.integer(n_flat_rows), param_size_plus_one)
)
} else {
Matrix::sparseMatrix(
i = integer(0), j = integer(0), x = numeric(0),
dims = c(as.integer(n_flat_rows), param_size_plus_one)
)
}
}
format_matrix <- function(matrix, format='dense') {
## Returns the matrix in the appropriate form,
## so that it can be efficiently loaded with our swig wrapper
## Convert gmp big rational/integer to double if gmp is available
if (requireNamespace("gmp", quietly = TRUE)) {
if (gmp::is.bigq(matrix) || gmp::is.bigz(matrix)) {
matdbl <- matrix(vapply(matrix, as.double, double(1)))
dim(matdbl) <- dim(matrix)
matrix <- matdbl
}
}
if (format == 'dense') {
## Ensure is 2D.
as.matrix(matrix)
} else if (format == 'sparse') {
Matrix::Matrix(matrix, sparse = TRUE)
} else if (format == 'scalar') {
as.matrix(matrix)
} else {
cli_abort("format_matrix: format {.val {format}} unknown.")
}
}
set_matrix_data <- function(linC, linR) {
## Calls the appropriate linop_* function to set the matrix
## data field of our C++ linOp.
if (is.list(linR$data) && inherits(linR$data, "LinOp_R")) {
if (linR$data$type == 'sparse_const') {
linop_set_sparse_data(linC, format_matrix(linR$data$data, 'sparse'))
} else if (linR$data$type == 'dense_const') {
linop_set_dense_data(linC, format_matrix(linR$data$data))
} else {
cli_abort("set_matrix_data: data.type {.val {linR$data$type}} unknown.")
}
} else {
if (linR$type == 'sparse_const') {
linop_set_sparse_data(linC, format_matrix(linR$data, 'sparse'))
} else {
linop_set_dense_data(linC, format_matrix(linR$data))
}
}
}
set_slice_data <- function(linC, linR) {
## Push each dimension's index vector (converted to 0-based) to C++.
## The last element of linR$data is the "key" sentinel marker.
for (i in seq.int(length(linR$data) - 1L)) {
sl <- linR$data[[i]]
linop_slice_push_back(linC, sl - 1L) ## Convert to 0-based for C++
}
}
build_lin_op_tree <- function(root_linR, tmp, verbose = FALSE) {
## Index-based BFS queue: push to tail, read from head (O(1) amortized)
queue <- vector("list", 32L)
qi <- 1L ## read index (head)
qn <- 1L ## write index (tail)
root_linC <- linop_new()
queue[[qn]] <- list(linR = root_linR, linC = root_linC)
qn <- qn + 1L
while(qi < qn) {
node <- queue[[qi]]
qi <- qi + 1L
linR <- node$linR
linC <- node$linC
## Enqueue child LinOps for BFS processing
for(argR in linR$args) {
tree <- linop_new()
tmp$push_back(tree)
queue[[qn]] <- list(linR = argR, linC = tree)
qn <- qn + 1L
linop_args_push_back(linC, tree)
}
## LinOp type is an enum in C++; R stores lowercase, C++ expects uppercase
linop_set_type(linC, toupper(linR$type))
## Setting size
linop_size_push_back(linC, as.integer(linR$shape[1]))
linop_size_push_back(linC, as.integer(linR$shape[2]))
## Loading the problem data into the appropriate array format
if(!is.null(linR$data)) {
if (is.list(linR$data) && length(linR$data) == 3L && linR$data[[3L]] == 'key') {
set_slice_data(linC, linR)
} else if(is.numeric(linR$data) || is.integer(linR$data)) {
linop_set_dense_data(linC, format_matrix(linR$data, 'scalar'))
linop_set_data_ndim(linC, 0L) ## scalar: data_ndim = 0 (CVXPY cppbackend.py line 155)
} else if(inherits(linR$data, "LinOp_R")) {
## Data is a LinOp_R sub-tree. Build it as a C++ linOp_data
## sub-tree for DPP tensor support (MUL/RMUL/MUL_ELEM with
## PARAM data). Also set inline matrix data for constants
## (backward compat with non-DPP path).
data_tree <- build_lin_op_tree(linR$data, tmp, verbose)
tmp$push_back(data_tree)
linop_set_linop_data(linC, data_tree)
linop_set_data_ndim(linC, length(linR$data$shape)) ## CVXPY cppbackend.py line 184
## For constant data, ALSO set inline matrix data so that
## non-DPP code paths still work (get_constant_data reads
## from sparse_data/dense_data directly).
if (linR$data$type %in% c('sparse_const', 'dense_const', 'scalar_const')) {
if (linR$data$type == 'scalar_const')
linop_set_dense_data(linC, format_matrix(linR$data$data, 'scalar'))
else
set_matrix_data(linC, linR)
}
} else if(is.list(linR$data) && !inherits(linR$data, "LinOp_R")) {
## Plain list metadata (e.g., axis/keepdims) -- skip.
## C++ doesn't need this; behavior is determined by LinOp type.
} else if(inherits(linR$data, "Matrix") || is.matrix(linR$data)) {
set_matrix_data(linC, linR)
linop_set_data_ndim(linC, 2L) ## matrix data: always 2D (CVXPY cppbackend.py line 248)
}
## else: unknown data type, silently skip
}
}
root_linC
}
Any scripts or data that you put into this service are public.
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.