forked from HiKapok/tf.extra_losses
-
Notifications
You must be signed in to change notification settings - Fork 0
/
l_softmax_op.h
85 lines (70 loc) · 4.72 KB
/
l_softmax_op.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
// MIT License
// Copyright (c) 2018 Changan Wang
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#ifndef KERNEL_L_SOFTMAX_H_
#define KERNEL_L_SOFTMAX_H_
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/op_kernel.h"
#include <cstdint>
#include <tuple>
#include <limits>
#include <iostream>
using tensorflow::TTypes;
using tensorflow::OpKernelContext;
using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;
#define _PI 3.14159265358979323846
template <typename Device, typename T>
struct LargeMarginSoftmaxFunctor {
void operator()(OpKernelContext* context, const Device& d, typename TTypes<T>::ConstFlat features, typename TTypes<T>::ConstFlat weights, typename TTypes<int32_t>::ConstFlat global_step, typename TTypes<int32_t>::ConstFlat labels,
const int32_t batch_size, const int32_t num_dimensions, const int32_t output_dimensions,
const float base, const float gamma, const float power, const float lambda_min, const int32_t margin_order, const bool b_angular,
typename TTypes<float>::Flat feat_norm, typename TTypes<float>::Flat weights_norm,
typename TTypes<float>::Flat cos_theta, typename TTypes<float>::Flat theta_seg,
typename TTypes<float>::Flat output_lambda, typename TTypes<T>::Flat losses);
};
template <typename Device, typename T>
struct LargeMarginSoftmaxGradFunctor {
void operator()(OpKernelContext* context, const Device& d, typename TTypes<T>::ConstFlat back_grads, typename TTypes<T>::ConstFlat features, typename TTypes<T>::ConstFlat weights, typename TTypes<float>::ConstFlat cur_lambda, typename TTypes<int32_t>::ConstFlat labels,
const int32_t batch_size, const int32_t num_dimensions, const int32_t output_dimensions, const int32_t margin_order, const bool b_angular,
typename TTypes<float>::Flat feat_norm, typename TTypes<float>::Flat weights_norm,
typename TTypes<float>::Flat cos_theta, typename TTypes<float>::Flat theta_seg,
typename TTypes<T>::Flat grad_features, typename TTypes<T>::Flat grad_weights);
};
#if GOOGLE_CUDA == 1
template <typename T>
struct LargeMarginSoftmaxFunctor<GPUDevice, T> {
void operator()(OpKernelContext* context, const GPUDevice& d, typename TTypes<T>::ConstFlat features, typename TTypes<T>::ConstFlat weights, typename TTypes<int32_t>::ConstFlat global_step, typename TTypes<int32_t>::ConstFlat labels,
const int32_t batch_size, const int32_t num_dimensions, const int32_t output_dimensions,
const float base, const float gamma, const float power, const float lambda_min, const int32_t margin_order, const bool b_angular,
typename TTypes<float>::Flat feat_norm, typename TTypes<float>::Flat weights_norm,
typename TTypes<float>::Flat cos_theta, typename TTypes<float>::Flat theta_seg,
typename TTypes<float>::Flat output_lambda, typename TTypes<T>::Flat losses);
};
#endif
#if GOOGLE_CUDA == 1
template <typename T>
struct LargeMarginSoftmaxGradFunctor<GPUDevice, T> {
void operator()(OpKernelContext* context, const GPUDevice& d, typename TTypes<T>::ConstFlat back_grads, typename TTypes<T>::ConstFlat features, typename TTypes<T>::ConstFlat weights, typename TTypes<float>::ConstFlat cur_lambda, typename TTypes<int32_t>::ConstFlat labels,
const int32_t batch_size, const int32_t num_dimensions, const int32_t output_dimensions, const int32_t margin_order, const bool b_angular,
typename TTypes<float>::Flat feat_norm, typename TTypes<float>::Flat weights_norm,
typename TTypes<float>::Flat cos_theta, typename TTypes<float>::Flat theta_seg,
typename TTypes<T>::Flat grad_features, typename TTypes<T>::Flat grad_weights);
};
#endif
#endif // KERNEL_L_SOFTMAX_H_