Skip to content

Commit

Permalink
Merge pull request #210 from ds4dm/vars_not_cols
Browse files Browse the repository at this point in the history
Remove LP notions (cols/rows) from observations
  • Loading branch information
dchetelat authored Aug 4, 2021
2 parents b8e3f90 + 3389ce6 commit e0d043a
Show file tree
Hide file tree
Showing 12 changed files with 196 additions and 144 deletions.
10 changes: 5 additions & 5 deletions libecole/include/ecole/observation/nodebipartite.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ namespace ecole::observation {
struct NodeBipartiteObs {
using value_type = double;

static inline std::size_t constexpr n_static_column_features = 5;
static inline std::size_t constexpr n_dynamic_column_features = 14;
static inline std::size_t constexpr n_column_features = n_static_column_features + n_dynamic_column_features;
enum struct ColumnFeatures : std::size_t {
static inline std::size_t constexpr n_static_variable_features = 5;
static inline std::size_t constexpr n_dynamic_variable_features = 14;
static inline std::size_t constexpr n_variable_features = n_static_variable_features + n_dynamic_variable_features;
enum struct VariableFeatures : std::size_t {
/** Static features */
objective = 0,
is_type_binary, // One hot encoded
Expand Down Expand Up @@ -54,7 +54,7 @@ struct NodeBipartiteObs {
scaled_age,
};

xt::xtensor<value_type, 2> column_features;
xt::xtensor<value_type, 2> variable_features;
xt::xtensor<value_type, 2> row_features;
utility::coo_matrix<value_type> edge_features;
};
Expand Down
16 changes: 16 additions & 0 deletions libecole/include/ecole/tweak/range.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#pragma once

#include <utility>

#include <nonstd/span.hpp>
#include <range/v3/range_fwd.hpp>

/**
* Tell the range library that `nonstd::span` is a view type.
*
* See `Rvalue Ranges and Views in C++20 <https://tristanbrindle.com/posts/rvalue-ranges-and-views>`_
* FIXME no longer needed when switching to C++20 ``std::span``.
* */
namespace ranges {
template <typename T, std::size_t Extent> inline constexpr bool enable_borrowed_range<nonstd::span<T, Extent>> = true;
} // namespace ranges
11 changes: 5 additions & 6 deletions libecole/src/dynamics/branching.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ std::optional<xt::xtensor<std::size_t, 1>> action_set(scip::Model const& model,
}
auto const branch_cands = pseudo ? model.pseudo_branch_cands() : model.lp_branch_cands();
auto branch_cols = xt::xtensor<std::size_t, 1>::from_shape({branch_cands.size()});
auto const var_to_idx = [](auto const var) { return SCIPcolGetLPPos(SCIPvarGetCol(var)); };
auto const var_to_idx = [](auto const var) { return SCIPvarGetProbindex(var); };
std::transform(branch_cands.begin(), branch_cands.end(), branch_cols.begin(), var_to_idx);

assert(branch_cols.size() > 0);
Expand All @@ -38,13 +38,12 @@ auto BranchingDynamics::reset_dynamics(scip::Model& model) -> std::tuple<bool, A
}

auto BranchingDynamics::step_dynamics(scip::Model& model, std::size_t const& var_idx) -> std::tuple<bool, ActionSet> {
auto const lp_cols = model.lp_columns();
if (var_idx >= lp_cols.size()) {
auto const vars = model.variables();
if (var_idx >= vars.size()) {
throw std::invalid_argument{
fmt::format("Branching candidate index {} larger than the number of columns ({}).", var_idx, lp_cols.size())};
fmt::format("Branching candidate index {} larger than the number of variables ({}).", var_idx, vars.size())};
}
auto* const var = SCIPcolGetVar(lp_cols[var_idx]);
scip::call(SCIPbranchVar, model.get_scip_ptr(), var, nullptr, nullptr, nullptr);
scip::call(SCIPbranchVar, model.get_scip_ptr(), vars[var_idx], nullptr, nullptr, nullptr);
model.solve_iter_branch(SCIP_BRANCHED);

if (model.solve_iter_is_done()) {
Expand Down
26 changes: 10 additions & 16 deletions libecole/src/observation/khalil-2016.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -551,15 +551,10 @@ void set_dynamic_features(
* The static features have been computed for all LP columns and stored in the order of `LPcolumns`.
* We need to find the one associated with the given variable.
*/
template <typename Tensor>
void set_precomputed_static_features(
Tensor&& out,
SCIP_VAR* const var,
xt::xtensor<value_type, 2> const& static_features) {

auto const col_idx = static_cast<std::ptrdiff_t>(SCIPcolGetIndex(SCIPvarGetCol(var)));
template <typename TensorOut, typename TensorIn>
void set_precomputed_static_features(TensorOut&& var_features, TensorIn const& var_static_features) {
using namespace xt::placeholders;
xt::view(out, xt::range(_, Khalil2016Obs::n_static_features)) = xt::row(static_features, col_idx);
xt::view(var_features, xt::range(_, Khalil2016Obs::n_static_features)) = var_static_features;
}

/******************************
Expand All @@ -568,20 +563,19 @@ void set_precomputed_static_features(

auto extract_all_features(scip::Model& model, xt::xtensor<value_type, 2> const& static_features) {
xt::xtensor<value_type, 2> observation{
{model.pseudo_branch_cands().size(), Khalil2016Obs::n_features},
{model.variables().size(), Khalil2016Obs::n_features},
std::nan(""),
};

auto* const scip = model.get_scip_ptr();
auto const active_rows_weights = stats_for_active_constraint_coefficients_weights(model);

auto const pseudo_branch_cands = model.pseudo_branch_cands();
auto const n_pseudo_branch_cands = pseudo_branch_cands.size();
for (std::size_t var_idx = 0; var_idx < n_pseudo_branch_cands; ++var_idx) {
auto* const var = pseudo_branch_cands[var_idx];
auto features = xt::row(observation, static_cast<std::ptrdiff_t>(var_idx));
set_precomputed_static_features(features, var, static_features);
set_dynamic_features(features, scip, var, active_rows_weights);
for (auto* var : model.pseudo_branch_cands()) {
auto const var_idx = SCIPvarGetProbindex(var);
auto var_features = xt::row(observation, var_idx);
auto var_static_features = xt::row(static_features, var_idx);
set_precomputed_static_features(var_features, var_static_features);
set_dynamic_features(var_features, scip, var, active_rows_weights);
}

return observation;
Expand Down
106 changes: 54 additions & 52 deletions libecole/src/observation/nodebipartite.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ namespace {
* Common helpers *
*********************/

using xmatrix = decltype(NodeBipartiteObs::column_features);
using xmatrix = decltype(NodeBipartiteObs::variable_features);
using value_type = xmatrix::value_type;

using ColumnFeatures = NodeBipartiteObs::ColumnFeatures;
using VariableFeatures = NodeBipartiteObs::VariableFeatures;
using RowFeatures = NodeBipartiteObs::RowFeatures;

value_type constexpr cste = 5.;
Expand All @@ -34,9 +34,9 @@ SCIP_Real obj_l2_norm(SCIP* const scip) noexcept {
return norm > 0 ? norm : 1.;
}

/******************************************
* Column features extraction functions *
******************************************/
/*******************************************
* Variable features extraction functions *
*******************************************/

std::optional<SCIP_Real> upper_bound(SCIP* const scip, SCIP_COL* const col) noexcept {
auto const ub_val = SCIPcolGetUb(col);
Expand Down Expand Up @@ -85,11 +85,11 @@ std::optional<SCIP_Real> avg_sol(SCIP* const scip, SCIP_VAR* const var) noexcept
return {};
}

std::optional<SCIP_Real> feas_frac(SCIP* const scip, SCIP_VAR* const var, SCIP_COL* const col) noexcept {
std::optional<SCIP_Real> feas_frac(SCIP* const scip, SCIP_VAR* const var) noexcept {
if (SCIPvarGetType(var) == SCIP_VARTYPE_CONTINUOUS) {
return {};
}
return SCIPfeasFrac(scip, SCIPcolGetPrimsol(col));
return SCIPfeasFrac(scip, SCIPvarGetLPSol(var));
}

/** Convert an enum to its underlying index. */
Expand All @@ -98,89 +98,89 @@ template <typename E> constexpr auto idx(E e) {
}

template <typename Features>
void set_static_features_for_col(Features&& out, SCIP_VAR* const var, SCIP_COL* const col, value_type obj_norm) {
out[idx(ColumnFeatures::objective)] = SCIPcolGetObj(col) / obj_norm;
void set_static_features_for_var(Features&& out, SCIP_VAR* const var, value_type obj_norm) {
out[idx(VariableFeatures::objective)] = SCIPvarGetObj(var) / obj_norm;
// On-hot enconding of varaible type
out[idx(ColumnFeatures::is_type_binary)] = 0.;
out[idx(ColumnFeatures::is_type_integer)] = 0.;
out[idx(ColumnFeatures::is_type_implicit_integer)] = 0.;
out[idx(ColumnFeatures::is_type_continuous)] = 0.;
out[idx(VariableFeatures::is_type_binary)] = 0.;
out[idx(VariableFeatures::is_type_integer)] = 0.;
out[idx(VariableFeatures::is_type_implicit_integer)] = 0.;
out[idx(VariableFeatures::is_type_continuous)] = 0.;
switch (SCIPvarGetType(var)) {
case SCIP_VARTYPE_BINARY:
out[idx(ColumnFeatures::is_type_binary)] = 1.;
out[idx(VariableFeatures::is_type_binary)] = 1.;
break;
case SCIP_VARTYPE_INTEGER:
out[idx(ColumnFeatures::is_type_integer)] = 1.;
out[idx(VariableFeatures::is_type_integer)] = 1.;
break;
case SCIP_VARTYPE_IMPLINT:
out[idx(ColumnFeatures::is_type_implicit_integer)] = 1.;
out[idx(VariableFeatures::is_type_implicit_integer)] = 1.;
break;
case SCIP_VARTYPE_CONTINUOUS:
out[idx(ColumnFeatures::is_type_continuous)] = 1.;
out[idx(VariableFeatures::is_type_continuous)] = 1.;
break;
default:
assert(false); // All enum cases must be handled
}
}

template <typename Features>
void set_dynamic_features_for_col(
void set_dynamic_features_for_var(
Features&& out,
SCIP* const scip,
SCIP_VAR* const var,
SCIP_COL* const col,
value_type obj_norm,
value_type n_lps) {
out[idx(ColumnFeatures::has_lower_bound)] = static_cast<value_type>(lower_bound(scip, col).has_value());
out[idx(ColumnFeatures::has_upper_bound)] = static_cast<value_type>(upper_bound(scip, col).has_value());
out[idx(ColumnFeatures::normed_reduced_cost)] = SCIPgetColRedcost(scip, col) / obj_norm;
out[idx(ColumnFeatures::solution_value)] = SCIPcolGetPrimsol(col);
out[idx(ColumnFeatures::solution_frac)] = feas_frac(scip, var, col).value_or(0.);
out[idx(ColumnFeatures::is_solution_at_lower_bound)] = static_cast<value_type>(is_prim_sol_at_lb(scip, col));
out[idx(ColumnFeatures::is_solution_at_upper_bound)] = static_cast<value_type>(is_prim_sol_at_ub(scip, col));
out[idx(ColumnFeatures::scaled_age)] = static_cast<value_type>(SCIPcolGetAge(col)) / (n_lps + cste);
out[idx(ColumnFeatures::incumbent_value)] = best_sol_val(scip, var).value_or(nan);
out[idx(ColumnFeatures::average_incumbent_value)] = avg_sol(scip, var).value_or(nan);
out[idx(VariableFeatures::has_lower_bound)] = static_cast<value_type>(lower_bound(scip, col).has_value());
out[idx(VariableFeatures::has_upper_bound)] = static_cast<value_type>(upper_bound(scip, col).has_value());
out[idx(VariableFeatures::normed_reduced_cost)] = SCIPgetVarRedcost(scip, var) / obj_norm;
out[idx(VariableFeatures::solution_value)] = SCIPvarGetLPSol(var);
out[idx(VariableFeatures::solution_frac)] = feas_frac(scip, var).value_or(0.);
out[idx(VariableFeatures::is_solution_at_lower_bound)] = static_cast<value_type>(is_prim_sol_at_lb(scip, col));
out[idx(VariableFeatures::is_solution_at_upper_bound)] = static_cast<value_type>(is_prim_sol_at_ub(scip, col));
out[idx(VariableFeatures::scaled_age)] = static_cast<value_type>(SCIPcolGetAge(col)) / (n_lps + cste);
out[idx(VariableFeatures::incumbent_value)] = best_sol_val(scip, var).value_or(nan);
out[idx(VariableFeatures::average_incumbent_value)] = avg_sol(scip, var).value_or(nan);
// On-hot encoding
out[idx(ColumnFeatures::is_basis_lower)] = 0.;
out[idx(ColumnFeatures::is_basis_basic)] = 0.;
out[idx(ColumnFeatures::is_basis_upper)] = 0.;
out[idx(ColumnFeatures::is_basis_zero)] = 0.;
out[idx(VariableFeatures::is_basis_lower)] = 0.;
out[idx(VariableFeatures::is_basis_basic)] = 0.;
out[idx(VariableFeatures::is_basis_upper)] = 0.;
out[idx(VariableFeatures::is_basis_zero)] = 0.;
switch (SCIPcolGetBasisStatus(col)) {
case SCIP_BASESTAT_LOWER:
out[idx(ColumnFeatures::is_basis_lower)] = 1.;
out[idx(VariableFeatures::is_basis_lower)] = 1.;
break;
case SCIP_BASESTAT_BASIC:
out[idx(ColumnFeatures::is_basis_basic)] = 1.;
out[idx(VariableFeatures::is_basis_basic)] = 1.;
break;
case SCIP_BASESTAT_UPPER:
out[idx(ColumnFeatures::is_basis_upper)] = 1.;
out[idx(VariableFeatures::is_basis_upper)] = 1.;
break;
case SCIP_BASESTAT_ZERO:
out[idx(ColumnFeatures::is_basis_zero)] = 1.;
out[idx(VariableFeatures::is_basis_zero)] = 1.;
break;
default:
assert(false); // All enum cases must be handled
}
}

void set_features_for_all_cols(xmatrix& out, scip::Model& model, bool const update_static) {
void set_features_for_all_vars(xmatrix& out, scip::Model& model, bool const update_static) {
auto* const scip = model.get_scip_ptr();

// Contant reused in every iterations
auto const n_lps = static_cast<value_type>(SCIPgetNLPs(scip));
auto const obj_norm = obj_l2_norm(scip);

auto const columns = model.lp_columns();
auto const n_columns = columns.size();
for (std::size_t col_idx = 0; col_idx < n_columns; ++col_idx) {
auto* const col = columns[col_idx];
auto* const var = SCIPcolGetVar(col);
auto features = xt::row(out, static_cast<std::ptrdiff_t>(col_idx));
auto const variables = model.variables();
auto const n_vars = variables.size();
for (std::size_t var_idx = 0; var_idx < n_vars; ++var_idx) {
auto* const var = variables[var_idx];
auto* const col = SCIPvarGetCol(var);
auto features = xt::row(out, static_cast<std::ptrdiff_t>(var_idx));
if (update_static) {
set_static_features_for_col(features, var, col, obj_norm);
set_static_features_for_var(features, var, obj_norm);
}
set_dynamic_features_for_col(features, scip, var, col, obj_norm, n_lps);
set_dynamic_features_for_var(features, scip, var, col, obj_norm, n_lps);
}
}

Expand Down Expand Up @@ -326,7 +326,7 @@ utility::coo_matrix<value_type> extract_edge_features(scip::Model& model) {
if (scip::get_unshifted_lhs(scip, row).has_value()) {
for (std::size_t k = 0; k < row_nnz; ++k) {
indices(0, j + k) = i;
indices(1, j + k) = static_cast<std::size_t>(SCIPcolGetLPPos(row_cols[k]));
indices(1, j + k) = static_cast<std::size_t>(SCIPcolGetVarProbindex(row_cols[k]));
values[j + k] = -row_vals[k];
}
j += row_nnz;
Expand All @@ -335,7 +335,7 @@ utility::coo_matrix<value_type> extract_edge_features(scip::Model& model) {
if (scip::get_unshifted_rhs(scip, row).has_value()) {
for (std::size_t k = 0; k < row_nnz; ++k) {
indices(0, j + k) = i;
indices(1, j + k) = static_cast<std::size_t>(SCIPcolGetLPPos(row_cols[k]));
indices(1, j + k) = static_cast<std::size_t>(SCIPcolGetVarProbindex(row_cols[k]));
values[j + k] = row_vals[k];
}
j += row_nnz;
Expand All @@ -344,8 +344,9 @@ utility::coo_matrix<value_type> extract_edge_features(scip::Model& model) {
}

auto const n_rows = n_ineq_rows(model);
auto const n_cols = static_cast<std::size_t>(SCIPgetNLPCols(scip));
return {values, indices, {n_rows, n_cols}};
// Change this here for variables
auto const n_vars = static_cast<std::size_t>(SCIPgetNVars(scip));
return {values, indices, {n_rows, n_vars}};
}

auto is_on_root_node(scip::Model& model) -> bool {
Expand All @@ -355,17 +356,18 @@ auto is_on_root_node(scip::Model& model) -> bool {

auto extract_observation_fully(scip::Model& model) -> NodeBipartiteObs {
auto obs = NodeBipartiteObs{
xmatrix::from_shape({model.lp_columns().size(), NodeBipartiteObs::n_column_features}),
// Change this here for variables
xmatrix::from_shape({model.variables().size(), NodeBipartiteObs::n_variable_features}),
xmatrix::from_shape({n_ineq_rows(model), NodeBipartiteObs::n_row_features}),
extract_edge_features(model),
};
set_features_for_all_cols(obs.column_features, model, true);
set_features_for_all_vars(obs.variable_features, model, true);
set_features_for_all_rows(obs.row_features, model, true);
return obs;
}

auto extract_observation_from_cache(scip::Model& model, NodeBipartiteObs obs) -> NodeBipartiteObs {
set_features_for_all_cols(obs.column_features, model, false);
set_features_for_all_vars(obs.variable_features, model, false);
set_features_for_all_rows(obs.row_features, model, false);
return obs;
}
Expand Down
8 changes: 4 additions & 4 deletions libecole/src/observation/pseudocosts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,13 @@ std::optional<xt::xtensor<double, 1>> Pseudocosts::extract(scip::Model& model, b
auto const [cands, lp_values] = scip_get_lp_branch_cands(scip);

/* Store pseudocosts in tensor */
auto const nb_lp_columns = static_cast<std::size_t>(SCIPgetNLPCols(scip));
xt::xtensor<double, 1> pseudocosts({nb_lp_columns}, std::nan(""));
auto const nb_vars = static_cast<std::size_t>(SCIPgetNVars(scip));
xt::xtensor<double, 1> pseudocosts({nb_vars}, std::nan(""));

for (auto const [var, lp_val] : views::zip(cands, lp_values)) {
auto const lp_index = static_cast<std::size_t>(SCIPcolGetLPPos(SCIPvarGetCol(var)));
auto const var_index = static_cast<std::size_t>(SCIPvarGetProbindex(var));
auto const score = SCIPgetVarPseudocostScore(scip, var, lp_val);
pseudocosts[lp_index] = static_cast<double>(score);
pseudocosts[var_index] = static_cast<double>(score);
}

return pseudocosts;
Expand Down
8 changes: 4 additions & 4 deletions libecole/src/observation/strongbranchingscores.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,12 @@ std::optional<xt::xtensor<double, 1>> StrongBranchingScores::extract(scip::Model
model.set_param("branching/vanillafullstrong/idempotent", idempotent);

/* Store strong branching scores in tensor */
auto const num_lp_columns = static_cast<std::size_t>(SCIPgetNLPCols(scip));
auto strong_branching_scores = xt::xtensor<double, 1>({num_lp_columns}, std::nan(""));
auto const nb_vars = static_cast<std::size_t>(SCIPgetNVars(scip));
auto strong_branching_scores = xt::xtensor<double, 1>({nb_vars}, std::nan(""));

for (auto const [var, score] : views::zip(cands, cands_scores)) {
auto const lp_index = static_cast<std::size_t>(SCIPcolGetLPPos(SCIPvarGetCol(var)));
strong_branching_scores[lp_index] = static_cast<double>(score);
auto const var_index = static_cast<std::size_t>(SCIPvarGetProbindex(var));
strong_branching_scores[var_index] = static_cast<double>(score);
}

return strong_branching_scores;
Expand Down
Loading

0 comments on commit e0d043a

Please sign in to comment.