-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmulti.h
142 lines (127 loc) · 4.98 KB
/
multi.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
/*
* @Descripttion: para calculate(multithread)
* @version:1.0
* @Author: Quikziii
* @email: [email protected]
* @Date: 2023-03-25
* @LastEditors: Quikziii
* @LastEditTime: 2023-04-29
* @massage:
*/
#ifndef _MULTI_H
#define _MULTI_H
#define THREADSIZE 8
struct ConvolutionArgs
{
struct Matrix *input;
struct Matrix **kernel;
struct Matrix *output;
int paddingAmt;
int stride;
};
/*
struct Args
{
struct Matrix *input;
struct Matrix *output;
};
*/
struct MaxArgs
{
struct Matrix *input;
struct Matrix *output;
int poolingSize;
int paddingAmt;
int stride;
};
pthread_t th[THREADSIZE]; // thread = 16
struct ConvolutionArgs CONV_Args[THREADSIZE]; //variable for convolution
//struct Args matrixArgs[THREADSIZE]; //variable for ReLU and to zero
struct MaxArgs Max_Args[THREADSIZE]; //variable for maxPooling
/**
* @name: Thread_Convolution
* @msg: convolution in thread
* @param {struct ConvolutionArgs} args - (input)all parameter of convolution
* @return {}
*/
void* Thread_Convolution(void *args);
/**
* @name: Multi_Convolution
* @msg: Convolution the batch of feature with the kernel with multiple thread
* @param {struct Matrix} **input - (input)the image which need to process convolution
* @param {struct Matrix} **kernel - (input)the kernels which provided to convolution
* @param {struct Matrix} **output - (output)the Batch of struct which store the ans
* @param {int} padingAmt - (input)the size need to be padding
* @param {int} stride - (input)the stride(step) the kernel move per steps
* @return {*}
*/
void Multi_Convolution(struct Matrix **input,struct Matrix **kernel,struct Matrix **output ,int paddingAmt,int stride);
/**
* @name: Thread_Convolution
* @msg: convolution in thread
* @param {struct Args} args - (input)all parameter of ReLU
* @return {}
*/
//void* Thread_ReLU(void *args);
/**
* @name: Multi_ReLU
* @msg: ReLu function if value below than 0 equal to 0 with multiple thread
* @param {struct Matrix} **input - (input)the feature batch which need to process ReLU
* @param {struct Matrix} **output - (output)the feature batch after process ReLU
* @return {*}
*/
//void Multi_ReLU (struct Matrix **input,struct Matrix **output);
/**
* @name: Thread_Convolution
* @msg: maxpooling in thread
* @param {struct MaxArgs} args - (input)all parameter of ReLU
* @return {}
*/
void* Thread_MaxPooLing(void *args);
/**
* @name: Multi_MaxPooLing
* @msg: apply maxPooling process to the Batch feature with multithread
* @param {struct Matrix} **input - (input)the Batch of feature which need to process max pooling
* @param {struct Matrix} **output - (output)the Batch of feature which after max pooling
* @param {int} poolingSize - (input)the size of the poolng kernel
* @param {int} paddingAmt - (input)the size need to be padding
* @param {int} stride - (input)the stride(step) the kernel move per steps
* @return {*}
*/
void Multi_MaxPooLing(struct Matrix **input,struct Matrix **output,int poolingSize,int paddingAmt,int stride);
/**
* @name: Thread_ToZero
* @msg: let gradient be zero in thread
* @param {struct MaxArgs} args - (input)all parameter of ReLU
* @return {}
*/
//void* Thread_ToZero(void *args);
/**
* @name: Multi_ReLU
* @msg: clear the previous gradient to zero with multiple thread
* @param {struct Matrix} **input - (input)the gradient batch which need to process to zero
* @return {*}
*/
//void Multi_ToZero (struct Matrix **input);
/**
* @name: Thread_Convolution_Variable
* @msg: calculate the gradient convolution layer in multiple thread
* @param {struct MaxArgs} args - (input)all parameter of gradient of conv
* @return {}
*/
void* Thread_Convolution_Variable(void *args);
/**
* @name: Multi_Convolution
* @msg: calculate the Convoulution layer gradient and gradient descent the kernel in multiple thread
* @param {struct Matrix} **lastTermGradient - (input)the batch of gradient after convolution layer
* @param {struct Matrix} **kernel - (output)the convolution kernel in this layer
* @param {struct Matrix} **gradient - (output)this layer gradient
* @param {struct Matrix} **variable - (input)the batch of matrix before convolution
* @param {struct Matrix} **gradientKernel - (input)this layer kernel gradient
* @param {int} stride - (input)the stride(step) the kernel move per steps
* @param {int} padingAmt - (input)the size need to be padding
* @return {}
*/
void Multi_GradientConvolution(struct Matrix **lastTermGradient ,struct Matrix **kernel ,struct Matrix **gradient,
struct Matrix **variable ,struct Matrix **gradientKernel, int stride, int paddingAmt);
#endif