Skip to content

Commit

Permalink
resolve linking problem
Browse files Browse the repository at this point in the history
  • Loading branch information
seemingwang committed Mar 29, 2021
1 parent 6535661 commit 63d7fdc
Show file tree
Hide file tree
Showing 3 changed files with 315 additions and 1 deletion.
6 changes: 5 additions & 1 deletion paddle/fluid/pybind/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,12 @@ include_directories(${PADDLE_SOURCE_DIR}/paddle/fluid/platform)
set(PYBIND_DEPS pybind python proto_desc memory executor fleet_wrapper box_wrapper prune
feed_fetch_method pass_builder parallel_executor profiler layer tracer engine scope_pool
analysis_predictor imperative_profiler imperative_flag save_load_util dlpack_tensor device_context
gloo_wrapper infer_io_utils heter_wrapper generator op_version_registry ps_gpu_wrapper custom_operator ps_service graph_py_service)
gloo_wrapper infer_io_utils heter_wrapper generator op_version_registry ps_gpu_wrapper custom_operator)

if (WITH_PSCORE)
set(PYBIND_DEPS ${PYBIND_DEPS} ps_service)
set(PYBIND_DEPS ${PYBIND_DEPS} graph_py_service)
endif()
if (WITH_GPU OR WITH_ROCM)
set(PYBIND_DEPS ${PYBIND_DEPS} dynload_cuda)
set(PYBIND_DEPS ${PYBIND_DEPS} cuda_device_guard)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import itertools
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig


class TRTAffineChannelTest(InferencePassTest):
def setUp(self):
self.bs = 2
self.channel = 8
self.height = 16
self.width = 16
self.data_layout = 'NCHW'
self.precision = AnalysisConfig.Precision.Float32
self.serialize = False
self.enable_trt = True

def build(self):
# set min_graph_size to 2,
# because affine channel doesn't support nhwc format
self.trt_parameters = InferencePassTest.TensorRTParam(
1 << 30, self.bs, 2, self.precision, self.serialize, False)

with fluid.program_guard(self.main_program, self.startup_program):
if self.data_layout == 'NCHW':
shape = [-1, self.channel, self.height, self.width]
else:
shape = [-1, self.height, self.width, self.channel]

data = fluid.data(name='in', shape=shape, dtype='float32')
# set scale, bias by constant
scale = fluid.layers.create_parameter(
shape=[self.channel],
dtype='float32',
default_initializer=fluid.initializer.Constant(2.))
bias = fluid.layers.create_parameter(
shape=[self.channel],
dtype='float32',
default_initializer=fluid.initializer.Constant(.5))
affine_channel_out = fluid.layers.affine_channel(
data, scale=scale, bias=bias, data_layout=self.data_layout)
out = fluid.layers.batch_norm(affine_channel_out, is_test=True)

shape[0] = self.bs
self.feeds = {'in': np.random.random(shape).astype('float32'), }
self.fetch_list = [out]

def check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
atol = 1e-5
if self.trt_parameters.precision == AnalysisConfig.Precision.Half:
atol = 1e-3
self.check_output_with_option(use_gpu, atol, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))

def run_test(self):
self.build()
self.check_output()

def run_test_all(self):
precision_opt = [
AnalysisConfig.Precision.Float32, AnalysisConfig.Precision.Half
]
serialize_opt = [False, True]

if self.data_layout == 'NCHW':
min_shape = [
self.bs, self.channel, self.height // 2, self.width // 2
]
max_shape = [self.bs, self.channel, self.height * 2, self.width * 2]
opt_shape = [self.bs, self.channel, self.height, self.width]

if self.data_layout == 'NHWC':
min_shape = [
self.bs, self.height // 2, self.width // 2, self.channel
]
max_shape = [self.bs, self.height * 2, self.width * 2, self.channel]
opt_shape = [self.bs, self.height, self.width, self.channel]

dynamic_shape_profile = InferencePassTest.DynamicShapeParam({
'in': min_shape
}, {'in': max_shape}, {'in': opt_shape}, False)
dynamic_shape_opt = [None, dynamic_shape_profile]

for precision, serialize, dynamic_shape in itertools.product(
precision_opt, serialize_opt, dynamic_shape_opt):
self.precision = precision
self.serialize = serialize
self.dynamic_shape_params = dynamic_shape
self.run_test()

def test_base(self):
self.run_test()

def test_fp16(self):
self.precision = AnalysisConfig.Precision.Half
self.run_test()

def test_serialize(self):
self.serialize = True
self.run_test()

def test_dynamic(self):
self.dynamic_shape_params = InferencePassTest.DynamicShapeParam({
'in': [self.bs, self.channel, self.height // 2, self.width // 2]
}, {'in': [self.bs, self.channel, self.height * 2, self.width * 2]
}, {'in': [self.bs, self.channel, self.height, self.width]}, False)
self.run_test()

def test_nchw_all(self):
self.run_test_all()

def test_nhwc(self):
self.data_layout = 'NHWC'
self.run_test_all()


if __name__ == "__main__":
unittest.main()
169 changes: 169 additions & 0 deletions scripts/paddle
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
#!/bin/bash

function version(){
echo "PaddlePaddle , compiled with"
echo " with_avx: ON"
echo " with_gpu: OFF"
echo " with_mkl: ON"
echo " with_mkldnn: "
echo " with_python: ON"
}

function ver2num() {
set -e
# convert version to number.
if [ -z "$1" ]; then # empty argument
printf "%03d%03d%03d%03d%03d" 0
else
local VERN=$(echo $1 | sed 's#v##g' | sed 's#\.# #g' \
| sed 's#a# 0 #g' | sed 's#b# 1 #g' | sed 's#rc# 2 #g')
if [ `echo $VERN | wc -w` -eq 3 ] ; then
printf "%03d%03d%03d%03d%03d" $VERN 999 999
else
printf "%03d%03d%03d%03d%03d" $VERN
fi
fi
set +e
}

function cpu_config() {
# auto set KMP_AFFINITY and OMP_DYNAMIC from Hyper Threading Status
# only when MKL enabled
if [ "ON" == "OFF" ]; then
return 0
fi
platform="`uname -s`"
ht=0
if [ $platform == "Linux" ]; then
ht=`lscpu |grep "per core"|awk -F':' '{print $2}'|xargs`
elif [ $platform == "Darwin" ]; then
if [ `sysctl -n hw.physicalcpu` -eq `sysctl -n hw.logicalcpu` ]; then
# HT is OFF
ht=1
fi
else
return 0
fi
if [ $ht -eq 1 ]; then # HT is OFF
if [ -z "$KMP_AFFINITY" ]; then
export KMP_AFFINITY="granularity=fine,compact,0,0"
fi
if [ -z "$OMP_DYNAMIC" ]; then
export OMP_DYNAMIC="FALSE"
fi
else # HT is ON
if [ -z "$KMP_AFFINITY" ]; then
export KMP_AFFINITY="granularity=fine,compact,1,0"
fi
if [ -z "$OMP_DYNAMIC" ]; then
export OMP_DYNAMIC="True"
fi
fi
}

function threads_config() {
# auto set OMP_NUM_THREADS and MKL_NUM_THREADS
# according to trainer_count and total processors
# only when MKL enabled
# auto set OPENBLAS_NUM_THREADS when do not use MKL
platform="`uname -s`"
processors=0
if [ $platform == "Linux" ]; then
processors=`grep "processor" /proc/cpuinfo|sort -u|wc -l`
elif [ $platform == "Darwin" ]; then
processors=`sysctl -n hw.logicalcpu`
else
return 0
fi
trainers=`grep -Eo 'trainer_count.[0-9]+' <<< "$@" |grep -Eo '[0-9]+'|xargs`
if [ -z $trainers ]; then
trainers=1
fi
threads=$((processors / trainers))
if [ $threads -eq 0 ]; then
threads=1
fi
if [ "ON" == "ON" ]; then
if [ -z "$OMP_NUM_THREADS" ]; then
export OMP_NUM_THREADS=$threads
fi
if [ -z "$MKL_NUM_THREADS" ]; then
export MKL_NUM_THREADS=$threads
fi
else
if [ -z "$OPENBLAS_NUM_THREADS" ]; then
export OPENBLAS_NUM_THREADS=$threads
fi
if [ $threads -gt 1 ] && [ -z "$OPENBLAS_MAIN_FREE" ]; then
export OPENBLAS_MAIN_FREE=1
fi
fi

}

PADDLE_CONF_HOME="$HOME/.config/paddle"
mkdir -p ${PADDLE_CONF_HOME}

if [ -z "${PADDLE_NO_STAT+x}" ]; then
SERVER_VER=`curl -m 5 -X POST --data content="{ \"version\": \"\" }"\
-b ${PADDLE_CONF_HOME}/paddle.cookie \
-c ${PADDLE_CONF_HOME}/paddle.cookie \
http://api.paddlepaddle.org/version 2>/dev/null`
if [ $? -eq 0 ] && [ "$(ver2num )" -lt $(ver2num $SERVER_VER) ]; then
echo "Paddle release a new version ${SERVER_VER}, you can get the install package in http://www.paddlepaddle.org"
fi
fi

PADDLE_BIN_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"

if [ ! -z "${DEBUGGER}" ]; then
echo "Using debug command ${DEBUGGER}"
fi

CUDNN_LIB_PATH=""

if [ ! -z "${CUDNN_LIB_PATH}" ]; then
export LD_LIBRARY_PATH=${CUDNN_LIB_PATH}:${LD_LIBRARY_PATH}
fi

export PYTHONPATH=${PWD}:${PYTHONPATH}


# Check python lib installed or not.
pip --help > /dev/null
if [ $? -ne 0 ]; then
echo "pip should be installed to run paddle."
exit 1
fi

if [ "OFF" == "ON" ]; then
PADDLE_NAME="paddlepaddle-gpu"
else
PADDLE_NAME="paddlepaddle"
fi

INSTALLED_VERSION=`pip freeze 2>/dev/null | grep "^${PADDLE_NAME}==" | sed 's/.*==//g'`

if [ -z "${INSTALLED_VERSION}" ]; then
INSTALLED_VERSION="0.0.0" # not installed
fi
cat <<EOF | python -
from distutils.version import LooseVersion
import sys
if LooseVersion("${INSTALLED_VERSION}") < LooseVersion(""):
sys.exit(1)
else:
sys.exit(0)
EOF

cpu_config
# echo $KMP_AFFINITY $OMP_DYNAMIC

case "$1" in
"version")
version
;;
*)
version
;;
esac

1 comment on commit 63d7fdc

@paddle-bot-old
Copy link

@paddle-bot-old paddle-bot-old bot commented on 63d7fdc Mar 29, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍PR: #31226 Commit ID: 63d7fdc contains failed CI.

Please sign in to comment.