您的位置:首页 > 理论基础 > 计算机网络

Intel DAAL AI加速——神经网络

2018-09-25 20:15 316 查看
# file: neural_net_dense_batch.py
#===============================================================================
# Copyright 2014-2018 Intel Corporation.
#
# This software and the related documents are Intel copyrighted  materials,  and
# your use of  them is  governed by the  express license  under which  they were
# provided to you (License).  Unless the License provides otherwise, you may not
# use, modify, copy, publish, distribute,  disclose or transmit this software or
# the related documents without Intel's prior written permission.
#
# This software and the related documents  are provided as  is,  with no express
# or implied  warranties,  other  than those  that are  expressly stated  in the
# License.
#===============================================================================

#
# !  Content:
# !    Python example of neural network training and scoring
# !*****************************************************************************

#
## <a name="DAAL-EXAMPLE-PY-NEURAL_NET_DENSE_BATCH"></a>
## \example neural_net_dense_batch.py
#

import os
import sys

import numpy as np

from daal.algorithms.neural_networks import initializers
from daal.algorithms.neural_networks import layers
from daal.algorithms import optimization_solver
from daal.algorithms.neural_networks import training, prediction
from daal.data_management import NumericTable, HomogenNumericTable

utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
if utils_folder not in sys.path:
sys.path.insert(0, utils_folder)
from utils import printTensors, readTensorFromCSV

# Input data set parameters
trainDatasetFile = os.path.join("..", "data", "batch", "neural_network_train.csv")
trainGroundTruthFile = os.path.join("..", "data", "batch", "neural_network_train_ground_truth.csv")
testDatasetFile = os.path.join("..", "data", "batch", "neural_network_test.csv")
testGroundTruthFile = os.path.join("..", "data", "batch", "neural_network_test_ground_truth.csv")

fc1 = 0
fc2 = 1
sm1 = 2

batchSize = 10

def configureNet():
# Create layers of the neural network
# Create fully-connected layer and initialize layer parameters
fullyConnectedLayer1 = layers.fullyconnected.Batch(5)
fullyConnectedLayer1.parameter.weightsInitializer = initializers.uniform.Batch(-0.001, 0.001)
fullyConnectedLayer1.parameter.biasesInitializer = initializers.uniform.Batch(0, 0.5)

# Create fully-connected layer and initialize layer parameters
fullyConnectedLayer2 = layers.fullyconnected.Batch(2)
fullyConnectedLayer2.parameter.weightsInitializer = initializers.uniform.Batch(0.5, 1)
fullyConnectedLayer2.parameter.biasesInitializer = initializers.uniform.Batch(0.5, 1)

# Create softmax layer and initialize layer parameters
softmaxCrossEntropyLayer = layers.loss.softmax_cross.Batch()

# Create configuration of the neural network with layers
topology = training.Topology()

# Add layers to the topology of the neural network
topology.push_back(fullyConnectedLayer1)
topology.push_back(fullyConnectedLayer2)
topology.push_back(softmaxCrossEntropyLayer)
topology.get(fc1).addNext(fc2)
topology.get(fc2).addNext(sm1)
return topology

def trainModel():
# Read training data set from a .csv file and create a tensor to store input data
trainingData = readTensorFromCSV(trainDatasetFile)
trainingGroundTruth = readTensorFromCSV(trainGroundTruthFile, True)

sgdAlgorithm = optimization_solver.sgd.Batch(fptype=np.float32)

# Set learning rate for the optimization solver used in the neural network
learningRate = 0.001
sgdAlgorithm.parameter.learningRateSequence = HomogenNumericTable(1, 1, NumericTable.doAllocate, learningRate)
# Set the batch size for the neural network training
sgdAlgorithm.parameter.batchSize = batchSize
sgdAlgorithm.parameter.nIterations = int(trainingData.getDimensionSize(0) / sgdAlgorithm.parameter.batchSize)

# Create an algorithm to train neural network
net = training.Batch(sgdAlgorithm)

sampleSize = trainingData.getDimensions()
sampleSize[0] = batchSize

# Configure the neural network
topology = configureNet()
net.initialize(sampleSize, topology)

# Pass a training data set and dependent values to the algorithm
net.input.setInput(training.data, trainingData)
net.input.setInput(training.groundTruth, trainingGroundTruth)

# Run the neural network training and retrieve training model
trainingModel = net.compute().get(training.model)
# return prediction model
return trainingModel.getPredictionModel_Float32()

def testModel(predictionModel):
# Read testing data set from a .csv file and create a tensor to store input data
predictionData = readTensorFromCSV(testDatasetFile)

# Create an algorithm to compute the neural network predictions
net = prediction.Batch()

net.parameter.batchSize = predictionData.getDimensionSize(0)

# Set input objects for the prediction neural network
net.input.setModelInput(prediction.model, predictionModel)
net.input.setTensorInput(prediction.data, predictionData)

# Run the neural network prediction
# and return results of the neural network prediction
return net.compute()

def printResults(predictionResult):
# Read testing ground truth from a .csv file and create a tensor to store the data
predictionGroundTruth = readTensorFromCSV(testGroundTruthFile)

printTensors(predictionGroundTruth, predictionResult.getResult(prediction.prediction),
"Ground truth", "Neural network predictions: each class probability",
"Neural network classification results (first 20 observations):", 20)

topology = ""
if __name__ == "__main__":

predictionModel = trainModel()

predictionResult = testModel(predictionModel)

printResults(predictionResult)

  目前支持的Layers

  • Common Parameters
  • Fully Connected Forward Layer
  • Fully Connected Backward Layer
  • Absolute Value ForwardLayer
  • Absolute Value Backward Layer
  • Logistic ForwardLayer
  • Logistic BackwardLayer
  • pReLU ForwardLayer
  • pReLU BackwardLayer
  • ReLU Forward Layer
  • ReLU BackwardLayer
  • SmoothReLU ForwardLayer
  • SmoothReLU BackwardLayer
  • Hyperbolic Tangent Forward Layer
  • Hyperbolic Tangent Backward Layer
  • Batch Normalization Forward Layer
  • Batch Normalization Backward Layer
  • Local-Response Normalization ForwardLayer
  • Local-Response Normalization Backward Layer
  • Local-Contrast Normalization ForwardLayer
  • Local-Contrast Normalization Backward Layer
  • Dropout ForwardLayer
  • Dropout BackwardLayer
  • 1D Max Pooling Forward Layer
  • 1D Max Pooling Backward Layer
  • 2D Max Pooling Forward Layer
  • 2D Max Pooling Backward Layer
  • 3D Max Pooling Forward Layer
  • 3D Max Pooling Backward Layer
  • 1D Average Pooling Forward Layer
  • 1D Average Pooling Backward Layer
  • 2D Average Pooling Forward Layer
  • 2D Average Pooling Backward Layer
  • 3D Average Pooling Forward Layer
  • 3D Average Pooling Backward Layer
  • 2D Stochastic Pooling Forward Layer
  • 2D Stochastic Pooling Backward Layer
  • 2D Spatial Pyramid Pooling ForwardLayer
  • 2D Spatial Pyramid Pooling BackwardLayer
  • 2D Convolution Forward Layer
  • 2D Convolution Backward Layer
  • 2D Transposed Convolution ForwardLayer
  • 2D Transposed Convolution BackwardLayer
  • 2D Locally-connected Forward Layer
  • 2D Locally-connected Backward Layer
  • Reshape ForwardLayer
  • Reshape BackwardLayer
  • Concat ForwardLayer
  • Concat BackwardLayer
  • Split Forward Layer
  • Split Backward Layer
  • Softmax ForwardLayer
  • Softmax BackwardLayer
  • Loss Forward Layer
  • Loss Backward Layer
  • Loss Softmax Cross-entropy ForwardLayer
  • Loss Softmax Cross-entropy BackwardLayer
  • Loss Logistic Cross-entropy ForwardLayer
  • Loss Logistic Cross-entropy BackwardLayer
  • Exponential Linear Unit Forward Layer
  • Exponential Linear Unit Backward Layer
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: 
相关文章推荐