R/009_utilities_power_tools.R

Defines functions powcone_constrs gm_constrs gm pow_neg pow_mid pow_high prettydict prettytuple over_bound lower_bound approx_error check_dyad decompose .split_dyad fracify dyad_completion make_frac is_dyad_weight is_weight is_dyad .limit_denominator is_power2 next_pow2 get_max_denom .bigq_to_key .bigq_vec

#####
## DO NOT EDIT THIS FILE!! EDIT THE SOURCE INSTEAD: rsrc_tree/utilities/power_tools.R
#####

## CVXPY SOURCE: utilities/power_tools.py
## Power-related utility functions for Power atom and geometric mean decomposition
##
## Complete port of CVXPY's power_tools.py with exact rational arithmetic (gmp).
## Key functions: fracify, dyad_completion, decompose, gm, gm_constrs,
## pow_high, pow_mid, pow_neg, powcone_constrs

# ======================================================================
# Pure arithmetic utilities (no CVXR dependencies)
# ======================================================================

#' Safe bigq concatenation -- prevents silent coercion from mixing types
#' @param ... Values to concatenate as bigq
#' @returns A bigq vector
#' @noRd
.bigq_vec <- function(...) {
  args <- list(...)
  do.call(c, lapply(args, as.bigq))
}

#' Convert a bigq vector to a string key for environment-based lookup
#' @param w A bigq vector
#' @returns Character string
#' @noRd
.bigq_to_key <- function(w) {
  n <- length(w)
  parts <- vapply(seq_len(n), function(i) as.character(w[i]), character(1))
  paste(parts, collapse = ",")
}

#' Get the maximum denominator from a bigq vector
#' @param tup A bigq vector
#' @returns Integer -- the maximum denominator
#' @noRd
get_max_denom <- function(tup) {
  tup <- as.bigq(tup)
  n <- length(tup)
  max(vapply(seq_len(n), function(i) as.integer(denominator(tup[i])), integer(1)))
}

#' First power of 2 >= n
#'
#' @param n Non-negative integer
#' @returns Integer -- smallest power of 2 that is >= n
#' @noRd
next_pow2 <- function(n) {
  ## CVXPY: next_pow2(n) in power_tools.py
  n <- as.integer(n)
  if (n <= 0L) return(1L)
  if (is_power2(n)) return(n)
  ## 2^ceil(log2(n)) -- safe for n up to ~2^30
  as.integer(2L^as.integer(ceiling(log2(n))))
}

#' Test if a number is a positive integer power of 2
#'
#' @param num A numeric or bigz value
#' @returns Logical
#' @noRd
is_power2 <- function(num) {
  ## CVXPY: is_power2(num) in power_tools.py
  ## Must be integer-valued, positive, and n & (n-1) == 0
  if (is.bigz(num)) {
    if (num <= 0L) return(FALSE)
    n <- as.integer(num)
    return(bitwAnd(n, n - 1L) == 0L)
  }
  is.numeric(num) && length(num) == 1L &&
    num > 0 &&
    num == as.integer(num) &&
    bitwAnd(as.integer(num), as.integer(num) - 1L) == 0L
}

#' Limit the denominator of a bigq fraction (CPython algorithm)
#'
#' Port of CPython's Fraction.limit_denominator() including semiconvergent check.
#' All arithmetic uses gmp bigz/bigq for exactness.
#'
#' @param frac A bigq scalar
#' @param max_denom A bigz or integer -- maximum allowed denominator
#' @returns A bigq scalar with denominator <= max_denom
#' @noRd
.limit_denominator <- function(frac, max_denom) {
  frac <- as.bigq(frac)
  max_denom <- as.bigz(max_denom)

  if (max_denom < 1L) {
    cli_abort("{.arg max_denom} must be >= 1, got {.val {as.character(max_denom)}}.")
  }

  ## Handle negative fractions by recursing on absolute value
  if (frac < 0L) {
    return(-.limit_denominator(-frac, max_denom))
  }

  ## Already within limit
  if (denominator(frac) <= max_denom) return(frac)

  ## CPython continued-fraction algorithm (exact port)
  p0 <- as.bigz(0L); q0 <- as.bigz(1L)
  p1 <- as.bigz(1L); q1 <- as.bigz(0L)
  n <- as.bigz(numerator(frac))
  d <- as.bigz(denominator(frac))

  repeat {
    a <- n %/% d
    q2 <- q0 + a * q1
    if (q2 > max_denom) break
    ## Simultaneous update
    tmp_p0 <- p1; tmp_q0 <- q1
    tmp_p1 <- p0 + a * p1; tmp_q1 <- q2
    p0 <- tmp_p0; q0 <- tmp_q0
    p1 <- tmp_p1; q1 <- tmp_q1
    ## Euclidean step
    tmp_n <- d; tmp_d <- n - a * d
    n <- tmp_n; d <- tmp_d
  }

  ## Two candidates: last convergent (p1/q1) and semiconvergent

  k <- (max_denom - q0) %/% q1
  bound1 <- as.bigq(p0 + k * p1, q0 + k * q1)
  bound2 <- as.bigq(p1, q1)

  ## Pick the closer approximation (prefer bound2 on tie)
  diff1 <- bound1 - frac
  diff2 <- bound2 - frac
  if (diff1 < 0L) diff1 <- -diff1
  if (diff2 < 0L) diff2 <- -diff2

  if (diff2 <= diff1) bound2 else bound1
}

#' Test if a value is a nonnegative dyadic fraction or integer
#'
#' @param frac An integer, bigz, or bigq value
#' @returns Logical
#' @noRd
is_dyad <- function(frac) {
  ## CVXPY: is_dyad(frac) in power_tools.py
  if (is.integer(frac) || is.bigz(frac)) {
    return(frac >= 0L)
  }
  if (is.bigq(frac)) {
    return(frac >= 0L && is_power2(as.integer(denominator(frac))))
  }
  ## Plain numeric: must be non-negative integer
  if (is.numeric(frac) && length(frac) == 1L) {
    return(frac >= 0 && frac == as.integer(frac))
  }
  FALSE
}

#' Test if w is a valid weight vector
#'
#' All elements must be nonnegative integer or bigq, and must sum to exactly 1.
#' Floats are rejected because their exact bigq representation rarely sums to 1.
#'
#' @param w A bigq vector, or numeric/integer vector
#' @returns Logical
#' @noRd
is_weight <- function(w) {
  ## CVXPY: is_weight(w) in power_tools.py
  w <- as.bigq(w)
  n <- length(w)
  if (n == 0L) return(FALSE)
  for (i in seq_len(n)) {
    if (w[i] < 0L) return(FALSE)
  }
  sum(w) == as.bigq(1L)
}

#' Test if w is a valid dyadic weight vector
#'
#' @param w A bigq vector (or coercible)
#' @returns Logical
#' @noRd
is_dyad_weight <- function(w) {
  ## CVXPY: is_dyad_weight(w) in power_tools.py
  is_weight(w) && all(vapply(seq_along(w), function(i) is_dyad(w[i]), logical(1)))
}

#' Approximate a/sum(a) as fractions with denominator exactly `denom`
#'
#' Uses the largest-remainder method for rounding.
#'
#' @param a Numeric vector or list of values
#' @param denom Integer -- the exact denominator to use
#' @returns A bigq vector summing to 1
#' @noRd
make_frac <- function(a, denom) {
  ## CVXPY: make_frac(a, denom) in power_tools.py
  if (is.list(a)) {
    a_num <- vapply(a, function(v) as.numeric(v), numeric(1))
  } else {
    a_num <- as.numeric(a)
  }
  a_norm <- a_num / sum(a_num)
  b <- as.integer(floor(denom * a_norm))
  err <- b / denom - a_norm

  remainder <- as.integer(denom - sum(b))
  if (remainder > 0L) {
    inds <- order(err)[seq_len(remainder)]
    b[inds] <- b[inds] + 1L
  }

  as.bigq(b, as.integer(denom))
}

#' Return the dyadic completion of a weight vector
#'
#' If w is already dyadic, returns w unchanged. Otherwise appends a dummy
#' variable and rescales to make all denominators powers of 2.
#'
#' @param w A bigq vector (nonneg, sums to 1)
#' @returns A bigq vector with all dyadic elements, summing to 1
#' @noRd
dyad_completion <- function(w) {
  ## CVXPY: dyad_completion(w) in power_tools.py
  w <- as.bigq(w)
  n <- length(w)

  ## Find non-dyadic denominators
  denoms <- vapply(seq_len(n), function(i) as.integer(denominator(w[i])), integer(1))
  non_dyad <- denoms[!vapply(denoms, is_power2, logical(1))]

  if (length(non_dyad) > 0L) {
    d <- max(non_dyad)
    p <- next_pow2(d)
    ## Scale: v*d/p for each element, append (p-d)/p as dummy
    w_aug <- c(w * as.bigq(d, p), as.bigq(p - d, p))
    return(dyad_completion(w_aug))
  }

  w
}

.EXCEED_DENOM_MSG <- paste0(
  "Can't reliably represent the input weight vector. ",
  "Try increasing {.arg max_denom} or checking the denominators ",
  "of your input fractions."
)

#' Normalize weights to exact rational fractions with dyadic completion
#'
#' Main entry point for converting a vector of weights into exact rational
#' fractions suitable for SOC-based geometric mean decomposition.
#'
#' @param a Numeric vector, bigq vector, or list of weights (nonneg)
#' @param max_denom Integer -- max denominator (rounded up to power of 2)
#' @param force_dyad Logical -- if TRUE, force w to be dyadic (w == w_dyad)
#' @returns List with `w` (bigq weight vector) and `w_dyad` (dyadic completion)
#' @noRd
fracify <- function(a, max_denom = 1024L, force_dyad = FALSE) {
  ## CVXPY: fracify(a, max_denom, force_dyad) in power_tools.py

  ## --- Input normalization ---
  if (is.bigq(a)) {
    n <- length(a)
    a_list <- lapply(seq_len(n), function(i) a[i])
    all_exact <- TRUE
  } else if (is.numeric(a)) {
    if (any(a < 0)) cli_abort("Input powers must be nonnegative.")
    n <- length(a)
    a_list <- as.list(a)
    ## Integer-valued numerics count as exact
    all_exact <- all(a == floor(a))
  } else if (is.list(a)) {
    n <- length(a)
    a_list <- a
    all_exact <- all(vapply(a_list, function(v) {
      is.integer(v) || is.bigq(v) || is.bigz(v)
    }, logical(1)))
    if (any(vapply(a_list, function(v) as.numeric(v) < 0, logical(1)))) {
      cli_abort("Input powers must be nonnegative.")
    }
  } else {
    cli_abort("Input {.arg a} must be numeric, bigq, or a list.")
  }

  if (!is.numeric(max_denom) || length(max_denom) != 1L ||
      max_denom <= 0 || max_denom != as.integer(max_denom)) {
    cli_abort("{.arg max_denom} must be a positive integer.")
  }

  max_denom <- next_pow2(as.integer(max_denom))

  ## --- Compute weight fractions ---
  if (force_dyad) {
    w_frac <- make_frac(a_list, max_denom)
  } else if (all_exact) {
    ## Exact path: integer/bigq division
    a_bigq <- do.call(c, lapply(a_list, as.bigq))
    total <- sum(a_bigq)
    w_frac <- a_bigq / total
    d_max <- get_max_denom(w_frac)
    if (d_max > max_denom) {
      cli_abort(.EXCEED_DENOM_MSG)
    }
  } else {
    ## Float path: approximate via limit_denominator
    a_num <- vapply(a_list, as.numeric, numeric(1))
    total <- sum(a_num)
    w_frac <- do.call(c, lapply(a_num, function(v) {
      .limit_denominator(as.bigq(v / total), as.bigz(max_denom))
    }))
    if (sum(w_frac) != as.bigq(1L)) {
      w_frac <- make_frac(a_list, max_denom)
    }
  }

  ## --- Dyadic completion ---
  w_dyad <- dyad_completion(w_frac)
  if (get_max_denom(w_dyad) > max_denom) {
    cli_abort(.EXCEED_DENOM_MSG)
  }

  list(w = w_frac, w_dyad = w_dyad)
}

#' Split a dyadic tuple into two children
#'
#' Greedy bit-peeling: double parent, peel bits into child1 until child1 sums to 1.
#' Basis vectors (contain a 1 entry) return empty list (cannot be split).
#'
#' @param w_dyad A bigq dyadic weight vector
#' @returns list(child1, child2) as bigq vectors, or list() for basis vectors
#' @noRd
.split_dyad <- function(w_dyad) {
  ## CVXPY: split(w_dyad) in power_tools.py (renamed to avoid base::split conflict)
  n <- length(w_dyad)

  ## Basis vector check
  if (any(w_dyad == 1L)) return(list())

  bit <- as.bigq(1L)
  child1 <- as.bigq(rep(0L, n))
  child2 <- as.bigq(2L) * w_dyad  # double the parent

  repeat {
    for (ind in seq_len(n)) {
      if (child2[ind] >= bit) {
        child2[ind] <- child2[ind] - bit
        child1[ind] <- child1[ind] + bit
      }
      if (sum(child1) == as.bigq(1L)) {
        return(list(child1, child2))
      }
    }
    bit <- bit / as.bigq(2L)
  }
}

#' Recursively split dyadic tuples to produce a DAG
#'
#' Returns an ordered dict (as list of keys and children) where interior nodes
#' represent SOC constraints for the geometric mean decomposition.
#' Uses BFS traversal and an environment for O(1) deduplication.
#'
#' @param w_dyad A bigq dyadic weight vector
#' @returns List with `keys` (list of bigq vectors) and `children` (list of child pairs)
#' @noRd
decompose <- function(w_dyad) {
  ## CVXPY: decompose(w_dyad) in power_tools.py
  w_dyad <- as.bigq(w_dyad)
  if (!is_dyad_weight(w_dyad)) {
    cli_abort("Input must be a dyadic weight vector, got: {prettytuple(w_dyad)}.")
  }

  keys <- list()
  children <- list()
  seen <- new.env(hash = TRUE, parent = emptyenv())

  ## BFS queue
  todo <- list(w_dyad)
  i <- 1L
  while (i <= length(todo)) {
    tup <- todo[[i]]
    key_str <- .bigq_to_key(tup)

    if (!exists(key_str, envir = seen, inherits = FALSE)) {
      assign(key_str, TRUE, envir = seen)
      ch <- .split_dyad(tup)
      keys <- c(keys, list(tup))
      children <- c(children, list(ch))

      if (length(ch) == 2L) {
        todo <- c(todo, list(ch[[1L]], ch[[2L]]))
      }
    }
    i <- i + 1L
  }

  list(keys = keys, children = children)
}

#' Check that w_dyad is a valid dyadic completion of w
#'
#' @param w A bigq weight vector
#' @param w_dyad A bigq dyadic weight vector (proposed completion)
#' @returns Logical
#' @noRd
check_dyad <- function(w, w_dyad) {
  ## CVXPY: check_dyad(w, w_dyad) in power_tools.py
  w <- as.bigq(w)
  w_dyad <- as.bigq(w_dyad)

  if (!(is_weight(w) && is_dyad_weight(w_dyad))) return(FALSE)

  n_w <- length(w)
  n_wd <- length(w_dyad)

  ## w is its own dyadic completion
  if (n_w == n_wd && all(w == w_dyad)) return(TRUE)

  if (n_wd == n_w + 1L) {
    dummy <- w_dyad[n_wd]
    scale <- as.bigq(1L) - dummy
    ## Rescale: w_dyad[1:n_w] / (1 - dummy) should equal w
    rescaled <- w_dyad[seq_len(n_w)] / scale
    return(all(w == rescaled))
  }

  FALSE
}

#' L-infinity approximation error between a/sum(a) and w_approx
#'
#' @param a_orig Numeric vector of original weights
#' @param w_approx A bigq weight vector (approximation)
#' @returns Numeric -- max absolute difference
#' @noRd
approx_error <- function(a_orig, w_approx) {
  ## CVXPY: approx_error(a_orig, w_approx) in power_tools.py
  a_orig <- as.numeric(a_orig)
  w_orig <- a_orig / sum(a_orig)
  w_num <- as.numeric(w_approx)
  max(abs(w_orig - w_num))
}

#' Lower bound on number of cones to represent a dyadic weight vector
#'
#' @param w_dyad A bigq dyadic weight vector
#' @returns Integer
#' @noRd
lower_bound <- function(w_dyad) {
  ## CVXPY: lower_bound(w_dyad) in power_tools.py
  w_dyad <- as.bigq(w_dyad)
  md <- get_max_denom(w_dyad)
  lb1 <- as.integer(round(log2(md)))
  ## Count nonzero entries minus 1
  n <- length(w_dyad)
  nz <- sum(vapply(seq_len(n), function(i) w_dyad[i] != 0L, logical(1)))
  lb2 <- nz - 1L
  max(lb1, lb2)
}

#' Excess cones beyond the lower bound
#'
#' @param w_dyad A bigq dyadic weight vector
#' @param tree Decomposition tree from decompose()
#' @returns Integer
#' @noRd
over_bound <- function(w_dyad, tree) {
  ## CVXPY: over_bound(w_dyad, tree) in power_tools.py
  w_dyad <- as.bigq(w_dyad)
  n <- length(w_dyad)
  nz <- sum(vapply(seq_len(n), function(i) w_dyad[i] != 0L, logical(1)))
  length(tree$keys) - lower_bound(w_dyad) - nz
}

#' String representation of a bigq vector as a tuple
#'
#' @param tup A bigq vector
#' @returns Character string like "(1/2, 1/4, 1/4)"
#' @noRd
prettytuple <- function(tup) {
  ## CVXPY: prettytuple(t) in power_tools.py
  tup <- as.bigq(tup)
  n <- length(tup)
  parts <- vapply(seq_len(n), function(i) as.character(tup[i]), character(1))
  paste0("(", paste(parts, collapse = ", "), ")")
}

#' Pretty-print a decomposition tree
#'
#' @param d Decomposition tree from decompose()
#' @returns Character string
#' @noRd
prettydict <- function(d) {
  ## CVXPY: prettydict(d) in power_tools.py
  ## Sort keys by max_denom desc
  max_denoms <- vapply(d$keys, get_max_denom, integer(1))
  key_order <- order(max_denoms, decreasing = TRUE)

  result <- ""
  for (idx in key_order) {
    result <- paste0(result, prettytuple(d$keys[[idx]]), "\n")
    ch <- d$children[[idx]]
    if (length(ch) == 2L) {
      ## Sort children by max_denom asc
      md1 <- get_max_denom(ch[[1L]])
      md2 <- get_max_denom(ch[[2L]])
      if (md1 <= md2) {
        result <- paste0(result, "  ", prettytuple(ch[[1L]]), "\n")
        result <- paste0(result, "  ", prettytuple(ch[[2L]]), "\n")
      } else {
        result <- paste0(result, "  ", prettytuple(ch[[2L]]), "\n")
        result <- paste0(result, "  ", prettytuple(ch[[1L]]), "\n")
      }
    }
  }
  result
}

# ======================================================================
# Power approximation functions (rewritten with gmp)
# ======================================================================

#' Rational approximation for p > 1 powers
#'
#' For p > 1, approximates 1/p as a rational and returns the power and weights.
#' With approx=TRUE, returns bigq values. With approx=FALSE, returns plain numerics.
#'
#' @param p Numeric exponent (p > 1)
#' @param max_denom Integer max denominator for approximation
#' @param approx Logical -- if TRUE, use rational approximation (bigq result)
#' @returns List with `p` (the power) and `w` (bigq or numeric weight vector of length 2)
#' @noRd
pow_high <- function(p, max_denom = 1024L, approx = TRUE) {
  ## CVXPY: pow_high(p, max_denom, approx) in power_tools.py
  if (p <= 1) cli_abort("pow_high requires p > 1, got {.val {p}}.")
  if (approx) {
    p_frac <- .limit_denominator(
      as.bigq(1L) / as.bigq(p),
      as.bigz(max_denom)
    )
    inv_p <- as.bigq(1L) / p_frac
    w <- .bigq_vec(p_frac, as.bigq(1L) - p_frac)
    ## If 1/p is integer, return int; otherwise return bigq
    if (denominator(inv_p) == 1L) {
      return(list(p = as.integer(numerator(inv_p)), w = w))
    }
    return(list(p = inv_p, w = w))
  }
  list(p = p, w = c(1 / p, 1 - 1 / p))
}

#' Rational approximation for 0 < p < 1 powers
#'
#' @param p Numeric exponent (0 < p < 1)
#' @param max_denom Integer max denominator for approximation
#' @param approx Logical -- if TRUE, use rational approximation (bigq result)
#' @returns List with `p` (bigq or numeric) and `w` (bigq or numeric weight vector)
#' @noRd
pow_mid <- function(p, max_denom = 1024L, approx = TRUE) {
  ## CVXPY: pow_mid(p, max_denom, approx) in power_tools.py
  if (p <= 0 || p >= 1) cli_abort("pow_mid requires 0 < p < 1, got {.val {p}}.")
  if (approx) {
    p_frac <- .limit_denominator(as.bigq(p), as.bigz(max_denom))
    w <- .bigq_vec(p_frac, as.bigq(1L) - p_frac)
    return(list(p = p_frac, w = w))
  }
  list(p = p, w = c(p, 1 - p))
}

#' Rational approximation for p < 0 powers
#'
#' @param p Numeric exponent (p < 0)
#' @param max_denom Integer max denominator for approximation
#' @param approx Logical -- if TRUE, use rational approximation (bigq result)
#' @returns List with `p` (bigq or numeric) and `w` (bigq or numeric weight vector)
#' @noRd
pow_neg <- function(p, max_denom = 1024L, approx = TRUE) {
  ## CVXPY: pow_neg(p, max_denom, approx) in power_tools.py
  if (p >= 0) cli_abort("pow_neg requires p < 0, got {.val {p}}.")
  if (approx) {
    p_frac <- as.bigq(p)
    ## Approximate p/(p-1)
    p_frac <- .limit_denominator(
      p_frac / (p_frac - as.bigq(1L)),
      as.bigz(max_denom)
    )
    p_ret <- p_frac / (p_frac - as.bigq(1L))
    w <- .bigq_vec(p_frac, as.bigq(1L) - p_frac)
    return(list(p = p_ret, w = w))
  }
  list(p = p, w = c(p / (p - 1), -1 / (p - 1)))
}

# ======================================================================
# CVXR expression-dependent functions
# ======================================================================

#' SOC constraint factory for geometric mean
#'
#' Creates the SOC constraint encoding the geometric mean
#' relation \eqn{t \le \sqrt{x \cdot y}}.
#'
#' @param t_var Expression -- the epigraph variable
#' @param x Expression -- first argument
#' @param y Expression -- second argument
#' @returns An SOC constraint
#' @noRd
gm <- function(t_var, x, y) {
  ## CVXPY: gm(t, x, y) in power_tools.py
  len <- expr_size(t_var)
  SOC(
    t = reshape_expr(x + y, c(len, 1L)),
    X = vstack(
      reshape_expr(x - y, c(1L, len)),
      reshape_expr(2L * t_var, c(1L, len))
    ),
    axis = 2L
  )
}

#' Full SOC constraint builder using dyadic decomposition tree
#'
#' Builds the set of SOC constraints needed to represent t <= x^p
#' (weighted geometric mean) using the dyadic decomposition of p.
#'
#' @param t_var Expression -- the epigraph variable
#' @param x_list List of Expression objects (same length as p)
#' @param p A bigq weight vector (nonneg, sums to 1)
#' @returns List of SOC constraints
#' @noRd
gm_constrs <- function(t_var, x_list, p) {
  ## CVXPY: gm_constrs(t, x_list, p) in power_tools.py
  if (!is_weight(p)) {
    cli_abort("Weight vector {.arg p} must be nonneg and sum to 1.")
  }

  w <- dyad_completion(p)
  tree <- decompose(w)

  ## Variable cache: string key -> Expression (lazy creation)
  var_cache <- new.env(hash = TRUE, parent = emptyenv())

  .get_or_create <- function(key_str) {
    if (!exists(key_str, envir = var_cache, inherits = FALSE)) {
      assign(key_str, Variable(t_var@shape), envir = var_cache)
    }
    get(key_str, envir = var_cache, inherits = FALSE)
  }

  ## Assign root tuple -> t_var
  root_key <- .bigq_to_key(w)
  assign(root_key, t_var, envir = var_cache)

  ## Extend x_list with t copies for dummy variables (dyadic completion padding)
  n_w <- length(w)
  n_x <- length(x_list)
  if (n_w > n_x) {
    x_list <- c(x_list, rep(list(t_var), n_w - n_x))
  }

  ## Assign basis vectors (standard unit vectors) -> corresponding x_list entries
  for (i in seq_len(n_w)) {
    if (w[i] > 0L) {
      basis <- as.bigq(rep(0L, n_w))
      basis[i] <- as.bigq(1L)
      key <- .bigq_to_key(basis)
      assign(key, x_list[[i]], envir = var_cache)
    }
  }

  ## Walk tree and create SOC constraints for interior nodes
  constraints <- list()
  for (idx in seq_along(tree$keys)) {
    elem <- tree$keys[[idx]]
    ch <- tree$children[[idx]]

    ## Skip basis vectors (they have a 1 entry) and leaf nodes
    if (!any(elem == 1L) && length(ch) == 2L) {
      d_elem <- .get_or_create(.bigq_to_key(elem))
      d_child1 <- .get_or_create(.bigq_to_key(ch[[1L]]))
      d_child2 <- .get_or_create(.bigq_to_key(ch[[2L]]))

      constraints <- c(constraints, list(gm(d_elem, d_child1, d_child2)))
    }
  }

  ## Handle single-variable case: t == x
  if (length(x_list) == 1L) {
    constraints <- c(constraints, list(t_var == x_list[[1L]]))
  }

  constraints
}

#' PowCone3D constraint factory
#'
#' Creates a single PowCone3D constraint encoding the power cone relation.
#'
#' @param t Expression (epigraph variable)
#' @param x_list List of two Expression objects
#' @param alpha Numeric scalar in (0, 1)
#' @returns List containing a single PowCone3D constraint
#' @noRd
powcone_constrs <- function(t, x_list, alpha) {
  ## CVXPY: powcone_constrs(t, x_list, p) in power_tools.py
  list(PowCone3D(x_list[[1L]], x_list[[2L]], t, alpha))
}

Try the CVXR package in your browser

Any scripts or data that you put into this service are public.

CVXR documentation built on March 6, 2026, 9:10 a.m.