Skip to content

Commit

Permalink
Update saliency code
Browse files Browse the repository at this point in the history
  • Loading branch information
Samir-Rashid committed Jun 12, 2024
1 parent 14d85d4 commit 786f7bb
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 62 deletions.
4 changes: 0 additions & 4 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
// See this page for reference of options: https://containers.dev/implementors/json_reference
{
"name": "Existing Dockerfile",
<<<<<<< Updated upstream
"image": "ghcr.io/tritonuas/obcpp:x86",
=======
"image": "ghcr.io/tritonuas/obcpp:jetson",
>>>>>>> Stashed changes
// enable when need to connect over USB to pixhawk
// also: need to run obcpp with sudo or add tuas user to dialout group with
// `sudo usermod -aG dialout tuas && newgrp && bash`
Expand Down
50 changes: 3 additions & 47 deletions docker/Dockerfile.jetson
Original file line number Diff line number Diff line change
@@ -1,12 +1,4 @@
<<<<<<< Updated upstream
FROM dustynv/l4t-pytorch:r36.2.0
=======
# FROM tritonuas/jetson-base:r36.2.0
FROM dustynv/l4t-pytorch:r36.2.0

# this base image came from dusty-nv/jetson-containers
# ./build.sh --name=tritonuas/jetson-base pytorch:2.1 torchvision opencv
>>>>>>> Stashed changes

ARG USERNAME=tuas USER_UID=1000 USER_GID=1000 DEBIAN_FRONTEND=noninteractive

Expand Down Expand Up @@ -58,14 +50,6 @@ RUN --mount=target=/var/lib/apt/lists,type=cache,sharing=locked \
libopenblas-dev \
ninja-build

# RUN sudo dpkg --remove libopencv-dev
# RUN sudo apt install -f
# RUN --mount=target=/var/lib/apt/lists,type=cache,sharing=locked \
# --mount=target=/var/cache/apt,type=cache,sharing=locked \
# rm -f /etc/apt/apt.conf.d/docker-clean \
# && apt-get update \
# && apt-get install -y libopencv-dev

RUN pip3 install typing-extensions PyYAML cpplint

RUN echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \
Expand All @@ -76,12 +60,11 @@ RUN git clone --depth 1 https://github.com/mavlink/MAVSDK.git --branch v2.9.1 --
&& cd MAVSDK \
&& git submodule update --init --recursive \
&& cmake -DCMAKE_BUILD_TYPE=Release -Bbuild/default -H. \
&& cmake --build build/default -j2 --target install
&& cmake --build build/default -j`nproc` --target install

# pull and build torchvision
# refer to this page for version compatibilty with pytorch (libtorch) https://github.com/pytorch/pytorch/wiki/PyTorch-Versions
ARG TORCHVISION_VERSION=0.17.0
<<<<<<< Updated upstream
# Space separated list of CUDA architecture versions.
# The version nubmers depend on the NVIDIA GPU model we're using and the installed CUDA version.
# For the Jetson Orin Nano with the "Ampere" architecture and CUDA 12.1 we can use version 8.6 (written as 86 in CUDA_ARCH_LIST).
Expand All @@ -97,7 +80,7 @@ RUN wget "https://github.com/pytorch/vision/archive/refs/tags/v${TORCHVISION_VER
&& mkdir build \
&& cd build \
&& cmake -DWITH_CUDA=1 -DTORCH_CUDA_ARCH_LIST="${CUDA_ARCH_LIST}" -DCUDA_HAS_FP16=1 -DCUDA_NO_HALF_OPERATORS=1 -DCUDA_NO_HALF_CONVERSIONS=1 -DCUDA_NO_HALF2_OPERATORS=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch" .. \
&& make -j4 \
&& make -j`nproc` \
&& make install

RUN pip3 install gdown
Expand All @@ -111,45 +94,18 @@ RUN gdown 1VtBji-cWfetM5nXZwt55JuHPWPGahQOH -O ${ARENA_TAR_PATH}
RUN tar -xvzf ${ARENA_TAR_PATH}
WORKDIR ${ARENA_EXTRACTED_PATH}
RUN sh Arena_SDK_ARM64.conf
=======
#ARG TORCHVISION_INSTALL_DIR=/torchvision-tmp
WORKDIR ${TORCHVISION_INSTALL_DIR}
RUN wget "https://github.com/pytorch/vision/archive/refs/tags/v${TORCHVISION_VERSION}.zip" \
&& unzip "v${TORCHVISION_VERSION}.zip" \
&& cd vision-0.17.0 \
&& mkdir build \
&& cd build \
&& cmake -DWITH_CUDA=1 -DCUDA_HAS_FP16=1 -DCUDA_NO_HALF_OPERATORS=1 -DCUDA_NO_HALF_CONVERSIONS=1 -DCUDA_NO_HALF2_OPERATORS=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch" .. \
&& make -j4 \
&& make install

# # Install g++10 and replace the older version. For some reason some c++ 20 features aren't working with g++9 even though
# # we have CMake configured to use c++ 20 https://stackoverflow.com/questions/6903.1073/why-am-i-missing-c20-headers-and-how-do-i-fix-this
# RUN apt-get update && apt-get install -y g++-10 gcc-10
# RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 10
# RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 10
# RUN update-alternatives --set gcc /usr/bin/gcc-10
# RUN update-alternatives --set g++ /usr/bin/g++-10
>>>>>>> Stashed changes

WORKDIR /obcpp
COPY . .

RUN rm -rf /obcpp/build
WORKDIR /obcpp/build
ENV CMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch;/usr/local/share/cmake/TorchVision"
<<<<<<< Updated upstream
RUN GITHUB_ACTIONS=true cmake -DCMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch;/usr/local/share/cmake/TorchVision" -DCMAKE_MODULE_PATH="/usr/local/share/cmake/TorchVision" -DCMAKE_BUILD_TYPE="Release" ..

RUN ninja obcpp
=======
RUN GITHUB_ACTIONS=true cmake -DCMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch;/usr/local/share/cmake/TorchVision" -DCMAKE_MODULE_PATH="/usr/local/share/cmake/TorchVision" -DCMAKE_BUILD_TYPE="Release" -DCMAKE_JOB_POOLS="j=2" ..

# RUN make obcpp cuda_check load_torchvision_model VERBOSE=1
RUN ninja obcpp
>>>>>>> Stashed changes

# login as non-root user
# USER $USERNAME

CMD [ "/obcpp/build/bin/obcpp" ]
CMD [ "/obcpp/build/bin/obcpp", "/obcpp/configs/config.json"]
24 changes: 17 additions & 7 deletions docker/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,23 @@
all:
echo "select one of the other targets"

build-x86-image:
docker buildx build --platform linux/amd64 --tag ghcr.io/tritonuas/obcpp:x86 --file Dockerfile.x86 ..

# NOTE: the jetson Docker image is based on a custom base image we generate from
# https://github.com/dusty-nv/jetson-containers/
# - Clone that repo and run the following command:
# ./build.sh --name=tritonuas/jetson-base pytorch:2.1 torchvision opencv
# - Push that image to dockerhub as whatever tag the FROM line in our Dockerfile.jetson is
# - Run the following build-jeston-image target
build-jetson-image:
docker buildx build --platform linux/arm64 --tag tritonuas/obcpp:jetson --file Dockerfile.jetson ..

push-jetson-image:
docker push tritonuas/obcpp:jetson
docker buildx build --platform linux/arm64 --tag ghcr.io/tritonuas/obcpp:jetson --file Dockerfile.jetson ..

build-arm-image:
DOCKER_BUILDKIT=1 docker build --tag tritonuas/obcpp:arm --file Dockerfile.arm ..
docker buildx build --platform linux/arm64 --tag ghcr.io/tritonuas/obcpp:arm --file Dockerfile.arm ..

push-jetson-image:
docker push ghcr.io/tritonuas/obcpp:jetson

run-jetson-pixhawk-compose:
docker compose -f jetson-pixhawk-compose.yml up -d
Expand All @@ -24,7 +27,14 @@ stop-jetson-pixhawk-compose:
docker compose -f jetson-pixhawk-compose.yml down

run-jetson-cuda-check:
docker run -it --rm --runtime nvidia -i tritonuas/obcpp:nvidia /obcpp/build/bin/cuda_check
docker run -it --rm --runtime nvidia -i ghcr.io/tritonuas/obcpp:jetson /obcpp/build/bin/cuda_check


# NOTE: Use this target for development where you want to quicly edit source code and recompile.
# This will spawn up the jetson container, launch you into a bash shell and mount the
# host's obcpp directory at "/obcpp" in the container. This means you can edit the source
# files on the host (with VSCode over SSH or Vim) and recompile in the Docker environment.
# Note that to re-run cmake inside the container you'll need the really long CMake command
# in the Dockerfile.jetson.
jetson-develop:
cd .. && docker run -it --net=host --runtime=nvidia --volume=./:/obcpp -i tritonuas/obcpp:jetson /bin/bash
cd .. && docker run -it --net=host --runtime=nvidia --platform=linux/arm64 --volume=./:/obcpp --device=/dev/ttyACM0 tritonuas/obcpp:jetson /bin/bash
1 change: 0 additions & 1 deletion include/cv/saliency.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ class Saliency {
private:
std::string modelPath; // path to prediction model
torch::jit::script::Module module; // the loaded model
// c10::Device device; //
};

#endif // INCLUDE_CV_SALIENCY_HPP_
6 changes: 3 additions & 3 deletions src/cv/saliency.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ Saliency::Saliency(std::string modelPath) {
catch (const c10::Error& e) {
LOG_F(ERROR, "error loading the model %s", e.msg().c_str());
}

}

std::vector<CroppedTarget> Saliency::salience(cv::Mat image) {
Expand All @@ -37,7 +36,8 @@ std::vector<CroppedTarget> Saliency::salience(cv::Mat image) {
// swap axis
tensor = Saliency::transpose(tensor, { (2), (0), (1) });

c10::Device device = torch::cuda::is_available() ? torch::kCUDA : torch::kCPU; // eventually add device as member of Saliency
// eventually add device as member of Saliency
c10::Device device = torch::cuda::is_available() ? torch::kCUDA : torch::kCPU;
auto tensor_cuda = tensor.to(device);

auto input_to_net = ToInput(tensor_cuda);
Expand All @@ -48,7 +48,7 @@ std::vector<CroppedTarget> Saliency::salience(cv::Mat image) {
* that we want are : a) boxes (FloatTensor[N, 4]): the predicted boxes, and
* b) scores (Tensor[N]): the scores of each detection.
*/

// output is a tuple of (losses, detections)
auto output = module.forward(input_to_net);
c10::ivalue::Tuple& tuple = output.toTupleRef();
Expand Down

0 comments on commit 786f7bb

Please sign in to comment.