Skip to content

Commit

Permalink
int8 inference on x86
Browse files Browse the repository at this point in the history
  • Loading branch information
nihui committed Jul 30, 2018
1 parent 6eb6abf commit 4be27a0
Show file tree
Hide file tree
Showing 5 changed files with 670 additions and 26 deletions.
198 changes: 198 additions & 0 deletions src/layer/x86/convolution_1x1_int8.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
// SenseNets is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

static void conv1x1s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int inch = bottom_blob.c;

int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;

const float *kernel = _kernel;

#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);

out0.fill(0);

int q = 0;

for (; q+7<inch; q+=8)
{
int* outptr0 = out0;

const signed char *kernel0 = (const signed char *)kernel + p * inch + q;

const signed char *r0 = bottom_blob.channel(q);
const signed char *r1 = bottom_blob.channel(q + 1);
const signed char *r2 = bottom_blob.channel(q + 2);
const signed char *r3 = bottom_blob.channel(q + 3);
const signed char *r4 = bottom_blob.channel(q + 4);
const signed char *r5 = bottom_blob.channel(q + 5);
const signed char *r6 = bottom_blob.channel(q + 6);
const signed char *r7 = bottom_blob.channel(q + 7);

int size = outw * outh;
int remain = size;

for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0] + (int)*r1 * (int)kernel0[1] +
(int)*r2 * (int)kernel0[2] + (int)*r3 * (int)kernel0[3] +
(int)*r4 * (int)kernel0[4] + (int)*r5 * (int)kernel0[5] +
(int)*r6 * (int)kernel0[6] + (int)*r7 * (int)kernel0[7];

*outptr0 += sum0;

r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
r7++;
outptr0++;
}
}

for (; q<inch; q++)
{
int* outptr0 = out0;

const signed char *r0 = bottom_blob.channel(q);

const signed char *kernel0 = (const signed char *)kernel + p * inch + q;
const signed char k0 = kernel0[0];

int size = outw * outh;
int remain = size;

for (; remain > 0; remain--)
{
int sum0 = (int)(*r0) * (int)k0;

*outptr0 += sum0;

r0++;
outptr0++;
}
}
}
}

static void conv1x1s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;

int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;

const int tailstep = w - 2*outw + w;
const signed char *kernel = _kernel;

#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);

out0.fill(0);

int q = 0;

for (; q+7<inch; q+=8)
{
int* outptr0 = out0;

const signed char *kernel0 = (const signed char *)kernel + p * inch + q;

const signed char *r0 = bottom_blob.channel(q);
const signed char *r1 = bottom_blob.channel(q + 1);
const signed char *r2 = bottom_blob.channel(q + 2);
const signed char *r3 = bottom_blob.channel(q + 3);
const signed char *r4 = bottom_blob.channel(q + 4);
const signed char *r5 = bottom_blob.channel(q + 5);
const signed char *r6 = bottom_blob.channel(q + 6);
const signed char *r7 = bottom_blob.channel(q + 7);

for(int i = 0; i < outh; i++)
{
int remain = outw;

for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0] + (int)*r1 * (int)kernel0[1] +
(int)*r2 * (int)kernel0[2] + (int)*r3 * (int)kernel0[3] +
(int)*r4 * (int)kernel0[4] + (int)*r5 * (int)kernel0[5] +
(int)*r6 * (int)kernel0[6] + (int)*r7 * (int)kernel0[7];

*outptr0 += sum0;

r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
r7 += 2;
outptr0++;
}

r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
r7 += tailstep;
}
}

for (; q<inch; q++)
{
int* outptr0 = out0;

const signed char *r0 = bottom_blob.channel(q);

const signed char *kernel0 = (const signed char *)kernel + p * inch + q;

for(int i = 0; i < outh; i++)
{
int remain = outw;

for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0];

*outptr0 += sum0;

r0 += 2;
outptr0++;
}

r0 += tailstep;
}
}
}
}
149 changes: 149 additions & 0 deletions src/layer/x86/convolution_3x3_int8.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
// SenseNets is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.

static void conv3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
int inch = bottom_blob.c;

int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;

const signed char *kernel = _kernel;

#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);

out0.fill(0);

const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;

for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;

const signed char *img0 = bottom_blob.channel(q);

const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;

for (int i = 0; i < outh; i++)
{
int remain = outw;

for (; remain > 0; remain--)
{
int sum0 = 0;

sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];

*outptr0 += sum0;

r0++;
r1++;
r2++;
outptr0++;
}

r0 += 2;
r1 += 2;
r2 += 2;
}

kernel0 += 9;
}
}
}

static void conv3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
int inch = bottom_blob.c;

int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;

const int tailstep = w - 2 * outw + w;

const signed char *kernel = _kernel;

#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);

out0.fill(0);

const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;

for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;

const signed char *img0 = bottom_blob.channel(q);

const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;

for (int i = 0; i < outh; i++)
{
int remain = outw;

for (; remain > 0; remain--)
{
int sum0 = 0;

sum0 += (int)r0[0] * (int)kernel0[0];
sum0 += (int)r0[1] * (int)kernel0[1];
sum0 += (int)r0[2] * (int)kernel0[2];
sum0 += (int)r1[0] * (int)kernel0[3];
sum0 += (int)r1[1] * (int)kernel0[4];
sum0 += (int)r1[2] * (int)kernel0[5];
sum0 += (int)r2[0] * (int)kernel0[6];
sum0 += (int)r2[1] * (int)kernel0[7];
sum0 += (int)r2[2] * (int)kernel0[8];

*outptr0 += sum0;

r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}

r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}

kernel0 += 9;
}
}
}
Loading

0 comments on commit 4be27a0

Please sign in to comment.