Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 28 additions & 8 deletions .github/workflows/nightly.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,35 @@ jobs:
export DATE=$(date +%F)
export SHA=$(gh api -q '.commit.sha' "repos/nvidia/cuopt/branches/${CUOPT_BRANCH}")

RUN_ID=$(gh workflow run build.yaml \
-f branch=${CUOPT_BRANCH} \
-f sha=${SHA} \
-f date=${DATE} \
-f build_type=nightly \
--json databaseId --jq '.databaseId')
gh workflow run build.yaml \
-f branch="${CUOPT_BRANCH}" \
-f sha="${SHA}" \
-f date="${DATE}" \
-f build_type=nightly

# Wait a short bit for the workflow to register (optional)
sleep 3

# Get the latest run ID for this workflow on this branch
RUN_ID=$(gh run list --workflow=build.yaml --branch="${CUOPT_BRANCH}" --json databaseId --limit 1 | jq -r '.[0].databaseId')

STATUS=$(gh run view $RUN_ID --json status,conclusion --jq '.status')
CONCLUSION=$(gh run view $RUN_ID --json status,conclusion --jq '.conclusion')

while [[ "$STATUS" != "completed" || "$CONCLUSION" == "null" ]]; do
echo "Status: $STATUS, Conclusion: $CONCLUSION — waiting 10 seconds..."
sleep 10
STATUS=$(gh run view $RUN_ID --json status,conclusion --jq '.status')
CONCLUSION=$(gh run view $RUN_ID --json status,conclusion --jq '.conclusion')
done

echo "Workflow run finished with conclusion: $CONCLUSION"

if [[ "$CONCLUSION" != "success" ]]; then
echo "Build did not succeed"
exit 1
fi

# Wait for workflow to complete
gh run watch $RUN_ID

trigger-test:
runs-on: ubuntu-latest
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ jobs:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/conda-python-tests.yaml@branch-25.06
with:
run_codecov: false
build_type: ${{ inputs.build_type }}
branch: ${{ inputs.branch }}
date: ${{ inputs.date }}
Expand Down
25 changes: 9 additions & 16 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,19 +64,10 @@ source and contribute to its development. Other operating systems may be compati
currently tested.

Building NVIDIA cuOpt with the provided conda environment is recommended for users who wish to enable all
library features. The following instructions are for building with a conda environment. Dependencies
for a minimal build of NVIDIA cuOpt without using conda are also listed below.
library features. The following instructions are for building with a conda environment.

### General requirements

Compilers:

These will be installed while creating the Conda environment

* `gcc` version 13.0+
* `nvcc` version 12.8+
* `cmake` version 3.30.4+

CUDA/GPU Runtime:

* CUDA 12.8
Expand Down Expand Up @@ -107,11 +98,13 @@ cd $CUOPT_HOME

#### Building with a conda environment

**Note:** Using a conda environment is the easiest way to satisfy the library's dependencies.
**Note:** Building from source without conda is very difficult. We highly recommend that users build cuOpt inside a conda environment

- Create the conda development environment:

Please install conda if you don't have it already. You can install it from [https://docs.conda.io/en/latest/miniconda.html](https://docs.conda.io/en/latest/miniconda.html)
Please install conda if you don't have it already. You can install [miniforge](https://conda-forge.org/download/) or [miniconda](https://www.anaconda.com/docs/getting-started/miniconda/install#linux)

**Note:** We recommend using [mamba](https://mamba.readthedocs.io/en/latest/installation/mamba-installation.html) as the package manager for the conda environment. Mamba is faster and more efficient than conda. And it's the default package manager for miniforge. If you are using mamba just replace `conda` with `mamba` in the following commands.

```bash
# create the conda environment (assuming in base `cuopt` directory)
Expand Down Expand Up @@ -164,8 +157,8 @@ To run the C++ tests, run

```bash
cd $CUOPT_HOME/datasets && get_test_data.sh
cd $CUOPT_HOME/datasets/linear_programming && download_pdlp_test_dataset.sh
cd $CUOPT_HOME/datasets/mip && download_miplib_test_dataset.sh
cd $CUOPT_HOME && datasets/linear_programming/download_pdlp_test_dataset.sh
datasets/mip/download_miplib_test_dataset.sh
export RAPIDS_DATASET_ROOT_DIR=$CUOPT_HOME/datasets/
ctest --test-dir ${CUOPT_HOME}/cpp/build # libcuopt
```
Expand All @@ -176,8 +169,8 @@ To run python tests, run
```bash

cd $CUOPT_HOME/datasets && get_test_data.sh
cd $CUOPT_HOME/datasets/linear_programming && download_pdlp_test_dataset.sh
cd $CUOPT_HOME/datasets/mip && download_miplib_test_dataset.sh
cd $CUOPT_HOME && datasets/linear_programming/download_pdlp_test_dataset.sh
datasets/mip/download_miplib_test_dataset.sh
export RAPIDS_DATASET_ROOT_DIR=$CUOPT_HOME/datasets/
cd $CUOPT_HOME/python
pytest -v ${CUOPT_HOME}/python/cuopt/cuopt/tests
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ cuOpt supports the following APIs:
- Linear Programming (LP)
- Mixed Integer Linear Programming (MILP)
- Routing (TSP, VRP, and PDP)

This repo is also hosted as a [COIN-OR](http://github.com/coin-or/cuopt/) project.

## Installation

Expand Down
11 changes: 6 additions & 5 deletions cpp/include/cuopt/linear_programming/mip/solver_settings.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,12 @@ class mip_solver_settings_t {
bool has_initial_solution() const;

struct tolerances_t {
f_t absolute_tolerance = 1.0e-4;
f_t relative_tolerance = 1.0e-6;
f_t integrality_tolerance = 1.0e-5;
f_t absolute_mip_gap = 1.0e-10;
f_t relative_mip_gap = 1.0e-4;
f_t presolve_absolute_tolerance = 1.0e-6;
f_t absolute_tolerance = 1.0e-4;
f_t relative_tolerance = 1.0e-6;
f_t integrality_tolerance = 1.0e-5;
f_t absolute_mip_gap = 1.0e-10;
f_t relative_mip_gap = 1.0e-4;
};

/**
Expand Down
6 changes: 3 additions & 3 deletions cpp/src/mip/diversity/diversity_manager.cu
Original file line number Diff line number Diff line change
Expand Up @@ -246,15 +246,15 @@ bool diversity_manager_t<i_t, f_t>::run_presolve(f_t time_limit)
if (termination_criterion_t::NO_UPDATE != term_crit) {
ls.constraint_prop.bounds_update.set_updated_bounds(*problem_ptr);
trivial_presolve(*problem_ptr);
if (!problem_ptr->empty) { check_bounds_sanity(*problem_ptr); }
if (!problem_ptr->empty && !check_bounds_sanity(*problem_ptr)) { return false; }
}
if (!problem_ptr->empty) {
// do the resizing no-matter what, bounds presolve might not change the bounds but initial
// trivial presolve might have
ls.constraint_prop.bounds_update.resize(*problem_ptr);
ls.constraint_prop.conditional_bounds_update.update_constraint_bounds(
*problem_ptr, ls.constraint_prop.bounds_update);
check_bounds_sanity(*problem_ptr);
if (!check_bounds_sanity(*problem_ptr)) { return false; }
}
stats.presolve_time = presolve_timer.elapsed_time();
return true;
Expand Down Expand Up @@ -351,7 +351,7 @@ solution_t<i_t, f_t> diversity_manager_t<i_t, f_t>::run_solver()
ls.lp_optimal_exists = true;
if (lp_result.get_termination_status() == pdlp_termination_status_t::Optimal) {
// get lp user objective and pass it to set_new_user_bound
set_new_user_bound(problem_ptr->get_user_obj_from_solver_obj(lp_result.get_objective_value()));
set_new_user_bound(lp_result.get_objective_value());
} else if (lp_result.get_termination_status() == pdlp_termination_status_t::PrimalInfeasible) {
// PDLP's infeasibility detection isn't an exact method and might be subject to false positives.
// Issue a warning, and continue solving.
Expand Down
37 changes: 25 additions & 12 deletions cpp/src/mip/presolve/bounds_presolve.cu
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@

#include <thrust/count.h>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform_reduce.h>
#include <thrust/tuple.h>
#include <utilities/copy_helpers.hpp>
#include <utilities/device_utils.cuh>
Expand Down Expand Up @@ -350,18 +352,29 @@ void bound_presolve_t<i_t, f_t>::calc_and_set_updated_constraint_bounds(problem_
{
calculate_activity_on_problem_bounds(pb);

thrust::transform(pb.handle_ptr->get_thrust_policy(),
upd.max_activity.begin(),
upd.max_activity.end(),
pb.constraint_upper_bounds.begin(),
pb.constraint_upper_bounds.begin(),
thrust::minimum<f_t>());
thrust::transform(pb.handle_ptr->get_thrust_policy(),
upd.min_activity.begin(),
upd.min_activity.end(),
pb.constraint_lower_bounds.begin(),
pb.constraint_lower_bounds.begin(),
thrust::maximum<f_t>());
thrust::for_each(pb.handle_ptr->get_thrust_policy(),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(pb.n_constraints),
[pb = pb.view(),
min_act = make_span(upd.min_activity),
max_act = make_span(upd.max_activity),
cnst_lb = make_span(pb.constraint_lower_bounds),
cnst_ub = make_span(pb.constraint_upper_bounds)] __device__(i_t idx) {
auto min_a = min_act[idx];
auto max_a = max_act[idx];
auto c_lb = cnst_lb[idx];
auto c_ub = cnst_ub[idx];
auto new_c_lb = max(c_lb, min_a);
auto new_c_ub = min(c_ub, max_a);
i_t infeas = check_infeasibility<i_t, f_t>(
min_a, max_a, new_c_lb, new_c_ub, pb.tolerances.presolve_absolute_tolerance);
if (!infeas && (new_c_lb > new_c_ub)) {
new_c_lb = (new_c_lb + new_c_ub) / 2;
new_c_ub = new_c_lb;
}
cnst_lb[idx] = new_c_lb;
cnst_ub[idx] = new_c_ub;
});
}

#if MIP_INSTANTIATE_FLOAT
Expand Down
6 changes: 6 additions & 0 deletions cpp/src/mip/presolve/bounds_update_helpers.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,12 @@ __global__ void calc_activity_kernel(typename problem_t<i_t, f_t>::view_t pb,

// Update bounds

template <typename i_t, typename f_t>
inline __device__ bool check_infeasibility(f_t min_a, f_t max_a, f_t cnst_lb, f_t cnst_ub, f_t eps)
{
return (min_a > cnst_ub + eps) || (max_a < cnst_lb - eps);
}

template <typename i_t, typename f_t>
inline __device__ bool check_infeasibility(
f_t min_a, f_t max_a, f_t cnst_lb, f_t cnst_ub, f_t abs_tol, f_t rel_tol)
Expand Down
6 changes: 4 additions & 2 deletions cpp/src/mip/problem/problem.cu
Original file line number Diff line number Diff line change
Expand Up @@ -388,23 +388,25 @@ void problem_t<i_t, f_t>::check_problem_representation(bool check_transposed,
"Sizes for vectors related to the constraints are not the same.");

// Check the validity of bounds
cuopt_assert(
cuopt_expects(
thrust::all_of(handle_ptr->get_thrust_policy(),
thrust::make_counting_iterator<i_t>(0),
thrust::make_counting_iterator<i_t>(n_variables),
[variable_lower_bounds = variable_lower_bounds.data(),
variable_upper_bounds = variable_upper_bounds.data()] __device__(i_t idx) {
return variable_lower_bounds[idx] <= variable_upper_bounds[idx];
}),
error_type_t::ValidationError,
"Variable bounds are invalid");
cuopt_assert(
cuopt_expects(
thrust::all_of(handle_ptr->get_thrust_policy(),
thrust::make_counting_iterator<i_t>(0),
thrust::make_counting_iterator<i_t>(n_constraints),
[constraint_lower_bounds = constraint_lower_bounds.data(),
constraint_upper_bounds = constraint_upper_bounds.data()] __device__(i_t idx) {
return constraint_lower_bounds[idx] <= constraint_upper_bounds[idx];
}),
error_type_t::ValidationError,
"Constraints bounds are invalid");

if (check_mip_related_data) {
Expand Down
32 changes: 15 additions & 17 deletions cpp/src/mip/problem/problem_helpers.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -248,42 +248,40 @@ static void check_csr_representation([[maybe_unused]] const rmm::device_uvector<
}

template <typename i_t, typename f_t>
static void check_var_bounds_sanity(const detail::problem_t<i_t, f_t>& problem)
static bool check_var_bounds_sanity(const detail::problem_t<i_t, f_t>& problem)
{
bool crossing_bounds_detected =
thrust::any_of(problem.handle_ptr->get_thrust_policy(),
thrust::counting_iterator(0),
thrust::counting_iterator((i_t)problem.variable_lower_bounds.size()),
[lb = make_span(problem.variable_lower_bounds),
ub = make_span(problem.variable_upper_bounds)] __device__(i_t index) {
return lb[index] > ub[index];
[tolerance = problem.tolerances.presolve_absolute_tolerance,
lb = make_span(problem.variable_lower_bounds),
ub = make_span(problem.variable_upper_bounds)] __device__(i_t index) {
return (lb[index] > ub[index] + tolerance);
});
cuopt_expects(!crossing_bounds_detected,
error_type_t::ValidationError,
"There shouldn't be any crossing bounds in variable bounds.");
return !crossing_bounds_detected;
}

template <typename i_t, typename f_t>
static void check_constraint_bounds_sanity(const detail::problem_t<i_t, f_t>& problem)
static bool check_constraint_bounds_sanity(const detail::problem_t<i_t, f_t>& problem)
{
bool crossing_bounds_detected =
thrust::any_of(problem.handle_ptr->get_thrust_policy(),
thrust::counting_iterator(0),
thrust::counting_iterator((i_t)problem.constraint_lower_bounds.size()),
[lb = make_span(problem.constraint_lower_bounds),
ub = make_span(problem.constraint_upper_bounds)] __device__(i_t index) {
return lb[index] > ub[index];
[tolerance = problem.tolerances.presolve_absolute_tolerance,
lb = make_span(problem.constraint_lower_bounds),
ub = make_span(problem.constraint_upper_bounds)] __device__(i_t index) {
return (lb[index] > ub[index] + tolerance);
});
cuopt_expects(!crossing_bounds_detected,
error_type_t::ValidationError,
"There shouldn't be any crossing bounds in constraints bounds.");
return !crossing_bounds_detected;
}

template <typename i_t, typename f_t>
static void check_bounds_sanity(const detail::problem_t<i_t, f_t>& problem)
static bool check_bounds_sanity(const detail::problem_t<i_t, f_t>& problem)
{
check_var_bounds_sanity<i_t, f_t>(problem);
check_constraint_bounds_sanity<i_t, f_t>(problem);
return check_var_bounds_sanity<i_t, f_t>(problem) &&
check_constraint_bounds_sanity<i_t, f_t>(problem);
}

} // namespace cuopt::linear_programming::detail
Loading
Loading