您的位置:首页 > 理论基础 > 计算机网络

运行结果和单线程一致的【辣鸡】神经网络。

2017-10-26 19:16 323 查看
希望一会儿做完可视化,能让我发现到底哪里出问题了

居然比单线程的程序慢了100倍!

Main.cpp

#include "net.h"

void core();

int main()
{
auto t1 = std::chrono::system_clock::now();
double program=clock();
//doit3551();
//doit251();
core();
auto t2 = std::chrono::system_clock::now();
cout<<"程序一共运行时间: " << std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()<<"ms"<<endl;
return 0;
}

void core()
{
//0 10 20 30 -> 137 数1点个数
//0 1 2 3 -> 137 识别二进制
std::vector<int>in({0,40,80,120});
std::vector<int>out({160});
net_t<162> net("data.txt");
srand(0);

std::vector<double>input0({0, 0, 0, 0}); //0
std::vector<double>input1({0, 0, 0, 1}); //1
std::vector<double>input2({0, 0, 1, 0}); //1
std::vector<double>input3({0, 0, 1, 1}); //0
std::vector<double>input4({0, 1, 0, 0}); //1
std::vector<double>input5({0, 1, 0, 1}); //0
std::vector<double>input6({0, 1, 1, 0}); //0
std::vector<double>input7({0, 1, 1, 1}); //1
std::vector<double>input8({1, 0, 0, 0}); //1
std::vector<double>input9({1, 0, 0, 1}); //0
std::vector<double>input10({1, 0, 1, 0}); //0
std::vector<double>input11({1, 0, 1, 1}); //1
std::vector<double>input12({1, 1, 0, 0}); //0
std::vector<double>input13({1, 1, 0, 1}); //1
std::vector<double>input14({1, 1, 1, 0}); //1
std::vector<double>input15({1, 1, 1, 1}); //0

std::vector<double>output0({0.05});
std::vector<double>output1({0.1});
std::vector<double>output2({0.15});
std::vector<double>output3({0.2});
std::vector<double>output4({0.25});
std::vector<double>output5({0.3});
std::vector<double>output6({0.35});
std::vector<double>output7({0.4});
std::vector<double>output8({0.45});
std::vector<double>output9({0.5});
std::vector<double>output10({0.55});
std::vector<double>output11({0.6});
std::vector<double>output12({0.65});
std::vector<double>output13({0.7});
std::vector<double>output14({0.75});
std::vector<double>output15({0.8});

net.activation_way = "ReLU";
net.setIO(input1, output1, &in, &out);
ANN::rate = 0.01;

//net.outputNetwork();
//net.outputNetwork();
//net.train(input5, output5);
//net.train(input6, output6);
//net.train(input7, output7);
//net.testOutput(input8);
//net.testOutput(input15);
//return;

double error=0;
for (int i = 1;i<=20000;++i){
error = 0;
error += net.train(input0, output0);
error += net.train(input1, output1);
error += net.train(input2, output2);
error += net.train(input3, output3);
error += net.train(input4, output4);
error += net.train(input5, output5);
error += net.train(input6, output6);
error += net.train(input7, output7);
error += net.train(input8, output8);
error += net.train(input9, output9);
error += net.train(input10, output10);
error += net.train(input11, output11);
error += net.train(input12, output12);
error += net.train(input13, output13);
error += net.train(input14, output14);
error += net.train(input15, output15);
error/=16;
cout<<error<<" "<<i<<"\r";
}
prln(error);
//prln(net.userful_neuron_size);
net.testOutput(input0);
net.testOutput(input1);
net.testOutput(input2);
net.testOutput(input3);
net.testOutput(input4);
net.testOutput(input5);
net.testOutput(input6);
net.testOutput(input7);
net.testOutput(input8);
net.testOutput(input9);
net.testOutput(input10);
net.testOutput(input11);
net.testOutput(input12);
net.testOutput(input13);
net.testOutput(input14);
net.testOutput(input15);

}

net.h

#ifndef __net_h__
#define __net_h__

#include <dbus-cxx.h>
//#include <unistd.h>

#include <bits/stdc++.h>
//#include <ctime>
//#include <chrono>
//#include <iomanip>

//#include <json/json.h>
//#include "recordlog.h"
//#include <memory>
//#include "threadsafe_queue.h" //线程安全点queue
#include "thread_pool.h"
//#include <functional>

using std::cin;
using std::endl;
using std::cout;

#define pr(x) cout<<#x<<" = "<<x<<" "
#define prln(x) cout<<#x<<" = "<<x<<endl

//默认只有一个线程,先跑通程序先
#define MAX_THREADS 1

//#define NODE (*this)
//需要定一个(neuron_t)NODE的引用!引用!!切记。 比如 NODE为*this
#define NODE_ARRAY (NODE.neuron_array)
#define NODE_GAIN (NODE.gain)
#define NODE_THETA (NODE.theta) //node节点的theta,也就是阈值
#define NODE_VALUE (NODE_GAIN + NODE_THETA) //node节点的实际能量(获得能量+theta)
#define NODE_OUTPUT (NODE.output) //node节点的实际输出
#define NODE_ACWAY (NODE.activation_way)
#define NODE_PE (NODE.partial_derivative) //node节点output,对最终答案的导数
#define D_NODE (ANN::derivative(NODE_VALUE, NODE_ACWAY)) //node节点获得的所有能量(加过theta的),对于node节点output的导数
#define NODE_BACK_ARRAY (NODE.back_array)

//#define NEXT_NODE (nextnode -> first)
//需要先定义一个(neuron_t)NEXT_NODE,其为真正点neunextnode点引用。
#define NEXT_NODE_OUTPUT (NEXT_NODE.output) //node节点的实际输出
#define NEXT_NODE_GAIN (NEXT_NODE.gain)
#define NEXT_NODE_PE (NEXT_NODE.partial_derivative) //node节点output,对最终答案的导数
#define NEXT_NODE_THETA (NEXT_NODE.theta) //node节点的theta,也就是阈值
#define NEXT_NODE_VALUE (NEXT_NODE_GAIN + NEXT_NODE_THETA) //node节点的实际能量(获得能量+theta)
#define NEXT_ACWAY (NEXT_NODE.activation_way)
#define D_NEXT_NODE (ANN::derivative(NEXT_NODE_VALUE, NEXT_ACWAY)) //node节点获得的所有能量(加过theta的),对于node节点output的导数
#define NODE_TO_NEXTNODE_WEIGHT (nextnode.second)
#define NEXT_NODE_COUNT (NEXT_NODE.count)
#define NEXT_NODE_IN_DEGREE (NEXT_NODE.in_degree)

//#define BACK_NODE (nextnode -> first)
#define BACK_NODE_OUTPUT (BACK_NODE.output)
#define BACK_NODE_GAIN (BACK_NODE.gain)
#define BACK_NODE_PE (BACK_NODE.partial_derivative)
#define BACK_NODE_COUNT (BACK_NODE.count)
#define BACK_NODE_OUT_DEGREE (BACK_NODE.out_degree)
//TODO

class neuron_t;
typedef std::vector<int> vector_map_t;
typedef std::pair<neuron_t*, double> PID; //第一个指针指向一个neuron_t,第二个是边权
typedef std::vector<PID> neuron_array_t;

namespace ANN
{
extern double rate;//学习率,全局变量

//学习函数
extern double sigmoid(double x);
extern double line(double x);
extern double ReLU(double x);

//求导函数,函数名字是activation_way
extern double derivative(double x, const std::string &activation_way);

//随机生成[L,R]范围点int
extern long long randomInt(long long L, long long R);

//随机生成[L,R]范围点double
extern double randomDouble(double l, double r);

//对于一个神经元,其gain到点值为sum,一个参数theta ps:正常是f(sum + theta) f函数是activation_way
extern double activationFunction(double sum, double theta, const std::string &activation_way);
}

//定义神经元节点
class neuron_t
{
public:
double gain; //获得点能量
double output; //实际输出
int number; //当前神经元编号(有0下标)
double theta;
neuron_array_t neuron_array; //当前节点,输出点对象。
neuron_array_t back_array; //当前节点点所有父节点,权重不重要。
double partial_derivative; //当前神经元点输出值,对error点导函数
bool is_input;
bool is_output;
std::string activation_way;
int count; //计数器,统计还有多少个前驱、后继点开关没有打开。为0点时候则该神经元可以访问
bool isInThreadPool; //当前神经元是否已经在线程池中了
int in_degree;
int out_degree;
std::mutex mutex; //锁,用于判定是否已经在队列中用点。

//取消了,一边更新一边开线程点策略.(反正程序也不会变快…还不好维护)
void propagate();
void back();
};

typedef std::function<void(neuron_t*, thread_pool*)> FNT;
namespace ANN{
void __propagate(neuron_t* node, thread_pool* pool);

const FNT propagate_neuron = __propagate;

void __back(neuron_t* node, thread_pool* pool);

const FNT back_neuron = __back;
}

template<int neuron_size>
class net_t
{
public:
neuron_t neurons[neuron_size];
vector_map_t vector_map[neuron_size]; //普通点地图,用来保存一开始读入点整个地图
std::string activation_way; //激活函数的选择,默认ReLU

std::vector<double> input_weight;

std::vector<int> output_number;
std::vector<int> input_number;
int tmp[neuron_size]; //临时数组,生成过n的全排列,和拓扑排序中记录入度。
int height[neuron_size]; //辅助构图的高度数组
int topology[neuron_size]; //拓扑序
thread_pool pool;

~net_t()
{
//nothing to do
}

net_t (std::string file_name):pool(MAX_THREADS){
//初始化新的网络
FILE *file = fopen(file_name.c_str(), "r");
if (file == NULL)
{
printf("文件不存在!请输入正常点数据!");
assert(0);
}
printf("读入点文件名为:[%s]\n", file_name.c_str());
int n;
fscanf(file, "%d", &n);
this -> activation_way = "ReLU";
for (int i = 0; i < neuron_size; ++ i){
vector_map[i].clear();
tmp[i] = i;
neurons[i].number = i;
neurons[i].is_input = false;
neurons[i].is_output = false;
}
this -> output_number.clear();
this -> input_number.clear();
printf("神经元个数为 %d\n", neuron_size);
while (n--){
int s, t;
fscanf(file, "%d%d", &s, &t);
vector_map[s].push_back(t);
vector_map[t].push_back(s);
}
for (int i = 0; i < neuron_size; ++ i) //去重,可能有a->b b->这样点读入,所以要去一次重
{
std::sort(vector_map[i].begin(), vector_map[i].end());
vector_map[i].erase(unique(vector_map[i].begin(), vector_map[i].end()), vector_map[i].end());
}
fclose(file);
}

//初始化输入节点
void initInputNeuron(std::vector<int> &input_num){
int sz = input_num.size(); //输入节点总数
input_weight.resize(sz); //输入节点到第一个节点点权重
for (int i = 0; i < sz; ++ i){
if (activation_way == "ReLU") input_weight[i] = 1;
else input_weight[i] = ANN::randomDouble(-1, 1);
neurons[input_num[i]].is_input = true;
}
}

//利用高度地图来构建地图
//TODO!!!!
//本次最大问题:某些点不到output,所以back操作点时候出了问题。
void buildNetwork()
{
auto build_map = [=](int from, int to){
if (activation_way == "ReLU"){
neurons[from].neuron_array.push_back(std::make_pair(&neurons[to], ANN::randomDouble(0.1,1)));
}
else{
neurons[from].neuron_array.push_back(std::make_pair(&neurons[to], ANN::randomDouble(-1,1)));
}
neurons[to].back_array.push_back(std::make_pair(&neurons[from], 999983)); //反向边,权重随便定了一个数字
};
for (int i = 0; i < neuron_size; ++ i){
for (auto curnode : vector_map[i]){
if (height[i] > height[curnode]){
build_map(i, curnode);
}
continue;
//TODO 考虑是否要增加网络复杂度
if (height[i] == height[curnode] && i < curnode)
{
build_map(i, curnode);
}
}
}
for (int i = 0; i < neuron_size; ++ i){
if (activation_way == "ReLU") neurons[i].theta = 0;
else neurons[i].theta = ANN::randomDouble(-1, 1);
neurons[i].in_degree = neurons[i].out_degree = 0;
}

auto cmp1 = [](auto a, auto b)->bool{return a<b;};
auto cmp2 = [](auto a, auto b)->bool{return a>b;};

auto bfs = [&](const std::vector<int> &st,const auto &cmp, const bool &flag)
{
static bool inqueue[neuron_size];
memset(inqueue, 0, sizeof(inqueue));
std::queue<int>q;
for (auto x : st)
{
q.push(x);
inqueue[x] = true; //标记走过点
}
while (!q.empty())
{
int now = q.front();
q.pop();
int debug=0;

for (auto x : vector_map[now])
{
//if (x==120) debug=1;
if (debug) cout<<"-------------\n"<<endl;
if (debug) cout<<now<<" "<<x<<" "<<height[now]<<" "<<height[x]<<" "<<inqueue[now]<<endl;
if (cmp (height[now] , height[x]))
{
//cout<<"@@@"<<endl;
if (!flag)
{
if (debug)cout<<"@@"<<endl;
++neurons[x].in_degree;
}
else
{
if (debug)cout<<"##"<<endl;
++neurons[x].out_degree;
}
//cout<< now << " "<< x << " "<<neurons[x].in_degree <<" "<< neurons[x].out_degree<<endl;
if (inqueue[x] == false)
{
inqueue[x] = true;
q.push(x);
}
}
debug=0;

}
}
//cout<<"===="<<endl;
};
bfs(input_number, cmp2, false);
bfs(output_number, cmp1, true);
}

void setIO(std::vector<double> &input,
std::vector<double> &output,
std::vector<int> *input_num = NULL,
std::vector<int> *output_num = NULL){
if (input.size() == 0){
//throws something TODO
return;
}
if (output.size() == 0){
//throws something TODO
return;
}

if (input_num && output_num)
{
output_number = *output_num;
input_number = *input_num;
if (input_num -> size() != input.size() || output_num -> size() != output.size())
{
//throws something TODO
return;
}
}
else
{
std::random_shuffle(tmp, tmp + neuron_size);
printf("output nodes are: ");
for (int i = 0; i < output.size(); ++ i){
output_number.push_back(tmp[i]);
printf("%d ",tmp[i]);
}
printf("\n");
printf("input nodes are:");
for (int i = output.size(); i < input.size() + output.size(); ++ i){
input_number.push_back(tmp[i]);
printf("%d ",tmp[i]);
}
printf("\n");
}
initInputNeuron(*input_num);

for (int i = 0; i < neuron_size; ++ i)
{
neurons[i].activation_way = activation_way;
neurons[i].out_degree = 0;
neurons[i].in_degree = 0;
}

for (int i = 0; i < output.size(); ++ i){
neurons[output_number[i]].is_output = true;
}

//下面代码是实现,给每个节点定义高度,从而实现构建DAG图
std::queue<int>q[output.size() + input.size()];
memset(height, -1, sizeof(height));
int cnt=0;
//下面点代码,把所有点输入,输出节点塞进队列中
for (auto curnode : output_number){
q[cnt++].push(curnode);
height[curnode] = 0;
}
for (auto curnode : input_number){
q[cnt++].push(curnode);
height[curnode] = neuron_size;
}
//然后同时从两个方向做BFS,直到BFS停止工作.
bool flag = true;
while (flag){
int cnt = 0;
flag = false;
for (auto curnode : output_number){
flag |= bfs(q[cnt++], 1);
}
for (auto curnode : input_number){
flag |= bfs(q[cnt++], -1);
}
}
buildNetwork();
}

bool bfs(std::queue<int> &q, int delta){
if (q.empty()){
return false;
}
int h = height[q.front()];
while (!q.empty() && height[q.front()] == h){
int curnode = q.front();
q.pop();
for (auto nextnode : vector_map[curnode]){
if (height[nextnode] != -1){
continue;
}
height[nextnode] = h + delta;
q.push(nextnode);
}
}
return true;
}

void propagate(std::vector<double> &input){
if (activation_way == "ReLU") //对于ReLU算法,暂且采用此方法,降低死亡率。
{
for (int i = 0; i < input_number.size(); ++i)
{
input_weight[i] = 1;
}
}
//初始化所有节点
for (int i = 0; i < neuron_size; ++ i){
neurons[i].gain = 0;
neurons[i].output = 0;
neurons[i].isInThreadPool = false; //默认肯定不在线程池中
neurons[i].count = neurons[i].in_degree;
}

for (int i = 0; i != input.size(); ++ i){
neuron_t &NODE = neurons[input_number[i]];
NODE.mutex.lock();
NODE_GAIN = NODE_GAIN + input_weight[i] * input[i];
NODE.isInThreadPool = true;
auto f = ANN::propagate_neuron;
pool.async<neuron_t*, thread_pool*>(f, &NODE, &pool);
NODE.mutex.unlock();
}
//在这里等待所有线程完毕
pool.wait();
}

void back(std::vector<double> &input, std::vector<double> &output){
for (int i = 0; i < neuron_size; ++ i){
neurons[i].partial_derivative = 0;
neurons[i].count = neurons[i].out_degree;
neurons[i].isInThreadPool = false;
}

for (int i = 0; i != output.size(); ++ i)
{
neuron_t &NODE = neurons[output_number[i]];
NODE_PE = NODE_OUTPUT - output[i];
//cout<< NODE_OUTPUT<<" "<<output[i]<<" "<<NODE_PE<<endl;
NODE_THETA -= NODE_PE * D_NODE * ANN::rate;
for (auto &backnode : NODE_BACK_ARRAY)
{
neuron_t &BACK_NODE = *(backnode.first);
//cout<< BACK_NODE.number << endl;
BACK_NODE.mutex.lock();
if (!--BACK_NODE_COUNT)
{
assert(BACK_NODE.isInThreadPool == false);
BACK_NODE.isInThreadPool = true;
auto f = ANN::back_neuron;
//cout<<"@"<<" "<<BACK_NODE.number<<endl;
pool.async<neuron_t*, thread_pool*>(f, &BACK_NODE, &pool);
}
BACK_NODE.mutex.unlock();
}
}
//在这里等待所有线程完毕
pool.wait();

for (int i = 0; i < input.size(); ++ i){
neuron_t &NODE = neurons[input_number[i]];
double tmp = input[i] * NODE_PE * D_NODE;
input_weight[i] -= tmp * ANN::rate;
}
}

double train(std::vector<double> &input, std::vector<double> &output){
propagate(input);

back(input, output);
double error=0;
for (int i = 0; i < output.size(); ++ i){
error += 0.5*pow((neurons[output_number[i]].output - output[i]), 2);
}
return error;
}

void outputNetwork(){
printf("---------------input nodes------------:\n");
for (int i = 0; i < input_number.size(); ++ i)
{
printf("[%d] weight:(%.7lf) \n", input_number[i], input_weight[i]);
}
printf("---------------other nodes------------\n");
printf("other nodes\n");
for (int i = 0; i < neuron_size; ++ i){
neuron_t &NODE = neurons[i];
printf("[%d] gain(%.7lf) theta(%.7lf) par_derivative(%.7lf) output(%.70lf) d(%.7lf) in_degree(%d) out_degree(%d) count(%d)\n",
NODE.number,
(double)NODE_GAIN,
NODE_THETA,
NODE_PE,
NODE_OUTPUT,
D_NODE,
NODE.out_degree,
NODE.in_degree,
(int)NODE.count);
for (auto nextnode : NODE.neuron_array){
printf(" -> %d (%.7lf)\n", nextnode.first -> number, nextnode.second);
}
}
printf("=============End====================\n");
}

void testOutput(std::vector<double> &input)
{
propagate(input);
cout<<"output: ";
for (auto curnode : output_number)
{
printf("%.7lf ", (double)neurons[curnode].output);
}
cout<<endl;
}

std::vector<double> getTest(std::vector<double> &input)
{
std::vector<double> q;
for (auto curnode : output_number)
{
q.push_back(neurons[curnode].gain);
}
return move(q);
}
};

#endif


net.cpp

#include "net.h"
#include <dbus-cxx.h>
#include <unistd.h>

#include <bits/stdc++.h>
#include <ctime>
#include <chrono>
#include <iomanip>

#include <json/json.h>
#include "recordlog.h"
#include <memory>
#include "threadsafe_queue.h" //线程安全点queue
#include "thread_pool.h"
#include <functional>

double ANN::rate;

double ANN::sigmoid(double x)
{
return 1.0/(1.0 + exp(-x));
}

double ANN::line(double x)
{
return x;
}

double ANN::ReLU(double x)
{
if (x<=0) return 0;
return x;
}

double ANN::derivative(double x, const std::string &activation_way)
{
if (activation_way == "sigmoid"){
return sigmoid(x) * (1 - sigmoid(x));
}
if (activation_way == "ReLU"){
if (x<0) return 0;
return 1;
}
if (activation_way == "line"){
return 1;
}
cout<<"no activationFunction!"<<endl;
assert(0);
return 0;
}

long long ANN::randomInt(long long L, long long R)
{
long long tmp = (unsigned long long)rand()
*(unsigned long long)rand()
*(unsigned long long)rand()
*(unsigned long long)rand() % (R - L + 1);
return L + tmp;
}

double ANN::randomDouble(double l, double r)
{
return randomInt(l*100000, r * 100000)/100000.0;
}

double ANN::activationFunction(double sum, double theta, const std::string &activation_way)
{
if (activation_way == "sigmoid"){
return sigmoid(sum + theta);
}
if (activation_way == "ReLU"){
return ReLU(sum + theta);
}
if (activation_way == "line"){
return line(sum + theta);
}
cout<<"no activationWay !" << endl;
assert(0);
return 0;
}

void neuron_t::propagate(){
neuron_t& NODE = *this;
NODE.mutex.lock();
this -> output = ANN::activationFunction(NODE_GAIN, NODE_THETA, NODE_ACWAY);
for (auto &nextnode : NODE_ARRAY){
neuron_t &NEXT_NODE = *(nextnode.first);
NEXT_NODE.mutex.lock();
NEXT_NODE_GAIN = NEXT_NODE_GAIN + NODE_OUTPUT * NODE_TO_NEXTNODE_WEIGHT;
-- NEXT_NODE_COUNT; //后继节点点入度减少1
NEXT_NODE.mutex.unlock();
}
NODE.mutex.unlock();
}

void neuron_t::back(){
neuron_t &NODE = *this;
NODE.mutex.lock();
for (auto &nextnode : NODE_ARRAY){
neuron_t &NEXT_NODE = *(nextnode.first);
NODE_PE += NEXT_NODE_PE * NODE_TO_NEXTNODE_WEIGHT * D_NEXT_NODE;
}
for (auto &nextnode : NODE_ARRAY){
neuron_t &NEXT_NODE = *(nextnode.first);
NODE_TO_NEXTNODE_WEIGHT -= NODE_OUTPUT * D_NEXT_NODE * NEXT_NODE_PE * ANN::rate;
}
NODE_THETA -= NODE_PE * D_NODE * ANN::rate;
//前驱节点点出度减少1
for (auto &backnode : NODE_BACK_ARRAY)
{
neuron_t &BACK_NODE = *(backnode.first);
BACK_NODE.mutex.lock();
-- BACK_NODE_COUNT;
BACK_NODE.mutex.unlock();
}
NODE.mutex.unlock();
}

void ANN::__propagate(neuron_t* node, thread_pool* pool)
{
//printf("节点%d 开始进入propagate计算\n", node-> number);
node -> propagate();
neuron_t &NODE = *node;
NODE.mutex.lock();
for (auto &x : NODE_ARRAY)
{
neuron_t &NEXT_NODE = *(x.first);
NEXT_NODE.mutex.lock();
if (!NEXT_NODE_COUNT && !NEXT_NODE.isInThreadPool)
{
NEXT_NODE.isInThreadPool = true;
FNT f = __propagate;
pool -> async<neuron_t*, thread_pool*>(f, &NEXT_NODE, pool);
}
NEXT_NODE.mutex.unlock();
}
NODE.mutex.unlock();
}

void ANN::__back(neuron_t* node, thread_pool* pool)
{
//printf("节点%d开始进入back计算\n", node -> number);
node -> back();
neuron_t &NODE = *node;
NODE.mutex.lock();
//printf("节点%d获得back锁\n", node -> number);
for (auto &x : NODE_BACK_ARRAY)
{
neuron_t &BACK_NODE = *(x.first);
BACK_NODE.mutex.lock();
if (!BACK_NODE_COUNT && !BACK_NODE.isInThreadPool)
{
BACK_NODE.isInThreadPool = true;
FNT f = __back;
pool -> async<neuron_t*, thread_pool*>(f, &BACK_NODE, pool);
}
BACK_NODE.mutex.unlock();
}
NODE.mutex.unlock();
}



thread_pool.cpp

#include "thread_pool.h"

/**
* Constructs a thread pool with `num_threads` threads and an empty work queue.
*/
thread_pool::thread_pool(unsigned int num_threads) : num_threads(num_threads){
task_mutex.lock();
init_threads();
task_mutex.unlock();
in_thread_number = 0;
}

/**
* Destructs a thread pool, waiting on tasks to finish.
*/
thread_pool::~thread_pool(){
task_mutex.lock();
join = true;
task_mutex.unlock();
for(auto i = threads.begin();i != threads.end();i++)
i->join();
threads.clear();
}

/**
* Creates threads for the thread pool.
*/
void thread_pool::init_threads(){
for(int i = 0;i < num_threads;i++){
std::function<void(void)> f = std::bind(&thread_pool::thread_func, this);
threads.push_back(std::move(std::thread(f)));
}
}

/**
* Manages thread execution. This is the function that threads actually run.
* It pulls a task out of the queue and executes it.
*/
void thread_pool::thread_func(){
for(;;){
++tot;
// Lock the queue.
task_mutex.lock();
/*
bool flag = task_mutex.try_lock();
if (!flag)
{
std::this_thread::yield();
continue;
}
*/

// If there's nothing to do and we're not ready to join, just
// yield.
if(tasks.empty() && !join){
task_mutex.unlock();
std::this_thread::yield();
continue;
}
// If there's tasks waiting, do one.
else if(!tasks.empty()){
// Get a task.
auto f = std::move(tasks.front());
++ in_thread_number;
tasks.pop_front();

// Unlock the queue.
task_mutex.unlock();

// Execute the async function.
f.get();
-- in_thread_number;
}
// If there's no tasks and we're ready to join, then exit the
// function (effectively joining).
else if(join){
task_mutex.unlock();
return;
}
}
}


thread_pool.h
#ifndef THREAD_POOL_H
#define THREAD_POOL_H

#include <atomic>
#include <deque>
#include <functional>
#include <future>
#include <list>
#include <bits/stdc++.h>
#include <condition_variable>

class thread_pool{
public:
thread_pool(unsigned int);
~thread_pool();
int tot=0;

void wait()
{
while(true)
{
if (task_mutex.try_lock())
{
if (tasks.empty() && !in_thread_number)
{
task_mutex.unlock();
break;
}
task_mutex.unlock();
std::this_thread::yield();
}
else std::this_thread::yield();
}
}

template<typename... Args>
std::future<void> async(std::function<void(Args...)> f, Args... args){
typedef std::function<void(Args...)> F;
std::promise<void> *p = new std::promise<void>;

// Create a function to package as a task.
auto task_wrapper = [p](F&& f, Args... args){
f(args...);
p->set_value();
};

// Create a function to package as a future for the user to wait on.
auto ret_wrapper = [p](){
p->get_future().get();

// Clean up resources
delete p;
};

task_mutex.lock();

// Package the task wrapper into a function to execute as a task.
auto task = std::async(std::launch::deferred,
task_wrapper,
std::move(f),
args...);

// Push the task onto the work queue.
tasks.emplace_back(std::move(task));

task_mutex.unlock();

// Package the return wrapper into a function for user to call to wait for the task to
// complete and to get the result.
return std::async(std::launch::deferred,
ret_wrapper);
}

protected:
void thread_func();

void init_threads();

private:
bool join = false;
unsigned int num_threads;

std::mutex task_mutex;
std::deque<std::future<void>> tasks;

std::list<std::thread> threads;

std::atomic<int> in_thread_number;
};

#endif // THREAD_POOL_HPP


Makefile

CXXFLAGS += -m64 \
-pipe \
-O2 \
-D_REENTRANT \
-W \
-fPIC \
-DQT_NO_DEBUG \
-DQT_NO_KEYWORDS \
-DQT_OPENGL_LIB \
-DQT_WIDGETS_LIB \
-DQT_GUI_LIB \
-DQT_XML_LIB \
-DQT_CORE_LIB \
-Wno-sign-compare \
-Wno-unused-result

INCPATH += -I. \
-isystem /usr/include/x86_64-linux-gnu/qt5 \
-isystem /usr/include/x86_64-linux-gnu/qt5/QtOpenGL \
-isystem /usr/include/x86_64-linux-gnu/qt5/QtWidgets \
-isystem /usr/include/x86_64-linux-gnu/qt5/QtGui \
-isystem /usr/include/x86_64-linux-gnu/qt5/QtXml \
-isystem /usr/include/x86_64-linux-gnu/qt5/QtCore \
-I/usr/lib/x86_64-linux-gnu/qt5/mkspecs/linux-g++-64 \
-I/usr/local/include/dbus-cxx-0.9/ \
-I/usr/include/dbus-1.0/ \
-I/usr/lib/x86_64-linux-gnu/dbus-1.0/include \
-I/usr/include/sigc++-2.0 \
-I/usr/lib/x86_64-linux-gnu/sigc++-2.0/include \
-I.

LIBS += -lQGLViewer-qt5 \
-lGL \
-lQt5OpenGL \
-lQt5Widgets \
-lQt5Gui \
-lQt5Xml \
-lQt5Core \
-lpthread \
-ljsoncpp \
-lQGLViewer-qt5 \
-ldbus-1 \
-lsigc-2.0 \
-lrt \
-ldbus-cxx \
-lpopt

objects = net.o thread_pool.o main.o

CXX = g++ $(CXXFLAGS) -std=c++14 $(INCPATH)

main : $(objects)
$(CXX) -o main $(objects) $(LIBS)
main.o : net.h main.cpp
$(CXX) -c main.cpp
net.o : net.cpp thread_pool.h net.h
$(CXX) -c net.cpp
thread_pool.o : thread_pool.h thread_pool.cpp
$(CXX) -c thread_pool.cpp

clean :
rm *.o



data.txt

972
0 1
0 8
0 101
0 114
0 65
0 44
1 120
1 0
1 148
1 114
1 79
1 101
2 109
2 8
2 144
2 52
2 65
2 83
3 77
3 7
3 22
3 100
3 26
3 96
4 61
4 103
4 142
4 117
4 137
4 25
5 150
5 40
5 90
5 66
5 105
5 10
6 125
6 80
6 37
6 120
6 161
6 148
7 77
7 3
7 58
7 100
7 11
7 21
8 44
8 65
8 83
8 0
8 2
8 114
9 49
9 153
9 118
9 151
9 124
9 24
10 111
10 55
10 112
10 40
10 66
10 5
11 115
11 139
11 21
11 7
11 19
11 58
12 50
12 110
12 91
12 78
12 99
12 15
13 76
13 39
13 62
13 89
13 38
13 63
14 97
14 94
14 156
14 90
14 141
14 150
15 96
15 110
15 71
15 99
15 22
15 12
16 23
16 158
16 72
16 79
16 101
16 84
17 84
17 60
17 80
17 161
17 29
17 155
18 68
18 122
18 20
18 136
18 61
18 121
19 115
19 27
19 116
19 139
19 112
19 11
20 122
20 18
20 106
20 52
20 137
20 61
21 125
21 11
21 148
21 100
21 139
21 7
22 100
22 37
22 96
22 3
22 15
22 71
23 158
23 44
23 16
23 101
23 142
23 103
24 49
24 42
24 151
24 113
24 9
24 140
25 138
25 117
25 133
25 30
25 4
25 103
26 86
26 77
26 143
26 3
26 47
26 96
27 19
27 65
27 144
27 116
27 139
27 114
28 135
28 123
28 78
28 152
28 91
28 145
29 60
29 73
29 155
29 74
29 17
29 35
30 25
30 117
30 133
30 113
30 41
30 149
31 158
31 119
31 138
31 98
31 72
31 43
32 141
32 85
32 107
32 82
32 45
32 156
33 36
33 145
33 134
33 91
33 95
33 50
34 93
34 56
34 88
34 64
34 46
34 102
35 36
35 73
35 59
35 29
35 95
35 74
36 50
36 95
36 35
36 33
36 74
36 29
37 6
37 22
37 100
37 71
37 125
37 80
38 62
38 129
38 13
38 89
38 126
38 81
39 76
39 160
39 13
39 159
39 108
39 89
40 115
40 5
40 10
40 105
40 112
40 58
41 68
41 149
41 92
41 30
41 121
41 117
42 24
42 119
42 113
42 133
42 43
42 151
43 119
43 98
43 59
43 151
43 73
43 42
44 8
44 23
44 142
44 83
44 101
44 0
45 85
45 156
45 123
45 57
45 32
45 135
46 93
46 126
46 132
46 88
46 34
46 62
47 86
47 97
47 143
47 57
47 26
47 67
48 70
48 111
48 55
48 154
48 54
48 130
49 129
49 9
49 140
49 24
49 124
49 113
50 36
50 12
50 91
50 99
50 33
50 74
51 85
51 123
51 152
51 160
51 108
51 107
52 137
52 83
52 20
52 2
52 106
52 109
53 94
53 147
53 127
53 66
53 75
53 150
54 70
54 147
54 55
54 146
54 48
54 87
55 111
55 48
55 10
55 147
55 54
55 66
56 87
56 34
56 88
56 104
56 64
56 146
57 156
57 47
57 45
57 67
57 97
57 135
58 115
58 77
58 105
58 7
58 40
58 11
59 35
59 151
59 43
59 95
59 153
59 73
60 74
60 80
60 29
60 17
60 99
60 71
61 137
61 68
61 4
61 20
61 18
61 117
62 38
62 93
62 13
62 126
62 46
62 63
63 93
63 76
63 69
63 13
63 102
63 62
64 127
64 34
64 146
64 102
64 56
64 157
65 8
65 27
65 144
65 114
65 2
65 0
66 147
66 55
66 53
66 150
66 10
66 5
67 86
67 135
67 78
67 57
67 110
67 47
68 117
68 18
68 41
68 61
68 121
68 4
69 157
69 102
69 82
69 63
69 107
69 76
70 48
70 54
70 131
70 130
70 87
70 154
71 99
71 15
71 80
71 37
71 60
71 22
72 84
72 31
72 16
72 155
72 158
72 98
73 98
73 155
73 35
73 29
73 43
73 59
74 60
74 36
74 29
74 99
74 50
74 35
75 157
75 94
75 82
75 127
75 53
75 141
76 160
76 39
76 63
76 13
76 107
76 69
77 7
77 58
77 26
77 3
77 105
77 143
78 135
78 67
78 28
78 12
78 91
78 110
79 101
79 120
79 16
79 161
79 1
79 84
80 6
80 60
80 17
80 71
80 161
80 37
81 149
81 140
81 92
81 129
81 126
81 38
82 157
82 75
82 69
82 32
82 107
82 141
83 8
83 137
83 52
83 142
83 44
83 2
84 72
84 161
84 17
84 155
84 16
84 79
85 51
85 32
85 45
85 107
85 123
85 160
86 96
86 26
86 110
86 47
86 67
86 143
87 56
87 70
87 104
87 146
87 131
87 54
88 56
88 34
88 46
88 104
88 136
88 132
89 159
89 13
89 124
89 129
89 39
89 38
90 97
90 14
90 143
90 150
90 105
90 5
91 12
91 50
91 78
91 33
91 28
91 145
92 149
92 41
92 81
92 126
92 121
92 132
93 34
93 102
93 63
93 62
93 46
93 13
94 53
94 75
94 141
94 150
94 14
94 127
95 36
95 153
95 134
95 59
95 33
95 35
96 86
96 22
96 15
96 3
96 110
96 26
97 14
97 47
97 90
97 143
97 57
97 156
98 73
98 31
98 155
98 43
98 119
98 72
99 74
99 71
99 15
99 50
99 60
99 12
100 22
100 37
100 3
100 125
100 7
100 21
101 79
101 0
101 23
101 16
101 44
101 1
102 93
102 157
102 69
102 64
102 63
102 34
103 142
103 4
103 158
103 138
103 23
103 25
104 131
104 136
104 56
104 87
104 88
104 122
105 58
105 143
105 90
105 40
105 5
105 77
106 122
106 20
106 131
106 130
106 52
106 109
107 32
107 160
107 69
107 85
107 82
107 76
108 159
108 51
108 128
108 39
108 160
108 152
109 2
109 130
109 154
109 144
109 106
109 52
110 86
110 12
110 15
110 78
110 96
110 67
111 10
111 48
111 112
111 55
111 116
111 154
112 115
112 111
112 116
112 10
112 19
112 40
113 42
113 133
113 30
113 24
113 140
113 149
114 1
114 139
114 0
114 148
114 65
114 27
115 11
115 40
115 112
115 58
115 19
115 105
116 112
116 27
116 19
116 144
116 154
116 111
117 68
117 25
117 30
117 4
117 41
117 61
118 153
118 9
118 134
118 128
118 124
118 159
119 31
119 42
119 43
119 133
119 98
119 138
120 1
120 6
120 79
120 148
120 161
120 125
121 132
121 68
121 92
121 136
121 41
121 18
122 106
122 20
122 18
122 131
122 136
122 104
123 51
123 135
123 28
123 45
123 152
123 85
124 159
124 49
124 89
124 118
124 9
124 129
125 21
125 6
125 100
125 148
125 37
125 120
126 46
126 92
126 62
126 132
126 81
126 38
127 53
127 146
127 75
127 64
127 157
127 147
128 108
128 118
128 134
128 152
128 145
128 159
129 49
129 38
129 81
129 89
129 140
129 124
130 109
130 70
130 154
130 106
130 131
130 48
131 70
131 122
131 104
131 106
131 87
131 130
132 121
132 46
132 126
132 92
132 136
132 88
133 25
133 138
133 42
133 30
133 113
133 119
134 153
134 95
134 118
134 33
134 128
134 145
135 28
135 67
135 123
135 78
135 45
135 57
136 104
136 18
136 121
136 88
136 132
136 122
137 61
137 52
137 83
137 20
137 4
137 142
138 31
138 25
138 133
138 103
138 119
138 158
139 11
139 114
139 148
139 19
139 21
139 27
140 49
140 149
140 81
140 113
140 129
140 24
141 32
141 94
141 156
141 82
141 75
141 14
142 44
142 103
142 83
142 4
142 23
142 137
143 97
143 47
143 26
143 90
143 105
143 77
144 27
144 65
144 109
144 2
144 116
144 154
145 152
145 33
145 91
145 128
145 28
145 134
146 127
146 54
146 64
146 87
146 147
146 56
147 66
147 54
147 53
147 55
147 146
147 127
148 1
148 120
148 114
148 139
148 125
148 21
149 81
149 41
149 140
149 92
149 113
149 30
150 5
150 94
150 90
150 66
150 14
150 53
151 24
151 59
151 43
151 9
151 153
151 42
152 145
152 51
152 28
152 123
152 128
152 108
153 134
153 95
153 9
153 118
153 151
153 59
154 109
154 130
154 116
154 144
154 48
154 111
155 73
155 98
155 29
155 84
155 17
155 72
156 57
156 14
156 45
156 141
156 97
156 32
157 75
157 82
157 102
157 69
157 127
157 64
158 31
158 23
158 16
158 103
158 72
158 138
159 108
159 124
159 89
159 39
159 128
159 118
160 76
160 51
160 39
160 107
160 108
160 85
161 84
161 6
161 17
161 79
161 80
161 120
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: 
相关文章推荐