Add ps optimizer info builder.

This commit is contained in:
ZPaC 2020-07-11 15:06:42 +08:00
parent 2e45aa6c19
commit 8273c2a39c
5 changed files with 339 additions and 1 deletions

View File

@ -1,5 +1,5 @@
file(GLOB_RECURSE _PARALLEL_SRC_FILES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "*.cc")
list(REMOVE_ITEM _PARALLEL_SRC_FILES "ps/util.cc" "ps/scheduler.cc" "ps/optimizer_info.cc")
list(REMOVE_ITEM _PARALLEL_SRC_FILES "ps/util.cc" "ps/scheduler.cc" "ps/optimizer_info.cc" "ps/optimizer_info_builder.cc")
if (ENABLE_DUMP_PROTO)
list(REMOVE_ITEM _PARALLEL_SRC_FILES "parallel/strategy_checkpoint/parallel_strategy_checkpoint.cc")
endif ()

View File

@ -0,0 +1,87 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_MINDSPORE_CCSRC_PARALLEL_PS_COMMON_H_
#define MINDSPORE_MINDSPORE_CCSRC_PARALLEL_PS_COMMON_H_
#include <iostream>
#include <vector>
#include <memory>
#include "ps/ps.h"
namespace mindspore {
namespace parallel {
namespace ps {
constexpr char kEnvCommType[] = "MS_COMM_TYPE";
constexpr char kEnvInterface[] = "MS_INTERFACE";
constexpr char kEnvPServerNum[] = "MS_SERVER_NUM";
constexpr char kEnvWorkerNum[] = "MS_WORKER_NUM";
constexpr char kEnvSchedulerHost[] = "MS_SCHED_HOST";
constexpr char kEnvSchedulerPort[] = "MS_SCHED_PORT";
constexpr char kEnvRole[] = "MS_ROLE";
constexpr char kEnvRoleOfPServer[] = "MS_PSERVER";
constexpr char kEnvRoleOfWorker[] = "MS_WORKER";
constexpr char kEnvRoleOfScheduler[] = "MS_SCHED";
constexpr char kDmlcCommType[] = "DMLC_PS_VAN_TYPE";
constexpr char kDmlcInterface[] = "DMLC_INTERFACE";
constexpr char kDmlcPServerNum[] = "DMLC_NUM_SERVER";
constexpr char kDmlcWorkerNum[] = "DMLC_NUM_WORKER";
constexpr char kDmlcRole[] = "DMLC_ROLE";
constexpr char kDmlcSchedulerHost[] = "DMLC_PS_ROOT_URI";
constexpr char kDmlcSchedulerPort[] = "DMLC_PS_ROOT_PORT";
constexpr char kCommTypeOfIBVerbs[] = "ibverbs";
constexpr char kCommTypeOfTCP[] = "zmq";
constexpr char kRoleOfPServer[] = "server";
constexpr char kRoleOfWorker[] = "worker";
constexpr char kRoleOfScheduler[] = "scheduler";
constexpr char kLearningRate[] = "learning_rate";
constexpr char kMomentum[] = "momentum";
constexpr char kApplyMomentum[] = "ApplyMomentum";
constexpr char kSparseAdam[] = "Adam";
constexpr char kSparseFtrl[] = "Ftrl";
constexpr int kInitWeightsCmd = 10;
constexpr int kInitWeightToOptimIdCmd = 11;
constexpr int kInitOptimInputsShapeCmd = 12;
constexpr int kInitEmbeddingsCmd = 20;
constexpr int kEmbeddingLookupCmd = 30;
constexpr size_t kInvalidKey = UINT64_MAX;
using Key = ::ps::Key;
using Keys = ::ps::SArray<Key>;
using Values = ::ps::SArray<float>;
using ValuesPtr = std::shared_ptr<Values>;
using Weight = ::ps::SArray<float>;
using Grad = ::ps::SArray<float>;
using LookupIds = ::ps::SArray<float>;
using Lengths = ::ps::SArray<int>;
using WeightPtr = std::shared_ptr<Weight>;
using GradPtr = std::shared_ptr<Grad>;
// using EmbeddingTable = std::unordered_map<int, WeightPtr>;
// using EmbeddingTable = ::ps::SArray<float>;
// using EmbeddingTablePtr = std::shared_ptr<EmbeddingTable>;
using InputsShape = std::vector<std::shared_ptr<std::vector<size_t>>>;
using InputsShapePtr = std::shared_ptr<std::vector<std::shared_ptr<std::vector<size_t>>>>;
} // namespace ps
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_MINDSPORE_CCSRC_PARALLEL_PS_COMMON_H_

View File

@ -0,0 +1,184 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parallel/ps/optimizer_info_builder.h"
#include <functional>
#include <vector>
#include <memory>
namespace mindspore {
namespace parallel {
namespace ps {
OptimizerInfo *OptimizerInfoBuilder::Build(const std::shared_ptr<PServerKernel> &pserver_kernel,
const WeightPtr &weight, const Keys &keys, const Values &values,
const Lengths &lens, const InputsShapePtr &inputs_shape, size_t worker_num) {
OptimizerInfo *optim_info = BuildInputs(weight, keys, values, lens, inputs_shape, worker_num);
std::vector<size_t> ws_sizes = pserver_kernel->workspace_sizes();
BuildWorkspaces(optim_info, ws_sizes, worker_num);
BuildOutputs(optim_info, worker_num);
return optim_info;
}
void OptimizerInfoBuilder::BuildWorkspaces(OptimizerInfo *info, const std::vector<size_t> &ws_sizes,
size_t worker_num) {
for (size_t i = 0; i < ws_sizes.size(); i++) {
size_t size = ws_sizes[i];
AddressPtr workspace = std::make_shared<kernel::Address>();
workspace->addr = new float[size];
workspace->size = size;
info->AddWorkspace(workspace);
}
}
OptimizerInfo *MomentumOptimInfoBuilder::BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values,
const Lengths &lens, const InputsShapePtr &inputs_shape,
size_t worker_num) {
AddressPtr weight_addr = std::make_shared<kernel::Address>();
weight_addr->addr = weight->data();
weight_addr->size = weight->size();
void *data_ptr = values.data();
AddressPtr accumulate = std::make_shared<kernel::Address>();
accumulate->addr = new float[weight->size()];
accumulate->size = weight->size();
AddressPtr learning_rate = std::make_shared<kernel::Address>();
learning_rate->addr = data_ptr;
learning_rate->size = lens[0];
AddressPtr gradient = std::make_shared<kernel::Address>();
gradient->addr = reinterpret_cast<float *>(learning_rate->addr) + lens[0];
gradient->size = lens[1];
AddressPtr momentum = std::make_shared<kernel::Address>();
momentum->addr = reinterpret_cast<float *>(gradient->addr) + lens[1];
momentum->size = lens[2];
return new MomentumOptimInfo(weight_addr, accumulate, learning_rate, gradient, momentum);
}
OptimizerInfo *SparseAdamOptimInfoBuilder::BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values,
const Lengths &lens, const InputsShapePtr &inputs_shape,
size_t worker_num) {
AddressPtr weight_addr = std::make_shared<kernel::Address>();
weight_addr->addr = weight->data();
weight_addr->size = weight->size();
AddressPtr m = std::make_shared<kernel::Address>();
m->addr = new float[weight->size()];
m->size = weight->size() * sizeof(float);
AddressPtr v = std::make_shared<kernel::Address>();
v->addr = new float[weight->size()];
v->size = weight->size() * sizeof(float);
void *data_ptr = values.data();
void *copy_data_ptr = new float[values.size()];
auto ret = memcpy_s(copy_data_ptr, values.size() * sizeof(float), data_ptr, values.size() * sizeof(float));
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret << ")";
}
AddressPtr beta1_power = std::make_shared<kernel::Address>();
beta1_power->addr = copy_data_ptr;
beta1_power->size = lens[0] * sizeof(float);
AddressPtr beta2_power = std::make_shared<kernel::Address>();
beta2_power->addr = reinterpret_cast<float *>(beta1_power->addr) + lens[0];
beta2_power->size = lens[1] * sizeof(float);
AddressPtr learning_rate = std::make_shared<kernel::Address>();
learning_rate->addr = reinterpret_cast<float *>(beta2_power->addr) + lens[1];
learning_rate->size = lens[2] * sizeof(float);
AddressPtr beta1 = std::make_shared<kernel::Address>();
beta1->addr = reinterpret_cast<float *>(learning_rate->addr) + lens[2];
beta1->size = lens[3] * sizeof(float);
AddressPtr beta2 = std::make_shared<kernel::Address>();
beta2->addr = reinterpret_cast<float *>(beta1->addr) + lens[3];
beta2->size = lens[4] * sizeof(float);
AddressPtr epsilon = std::make_shared<kernel::Address>();
epsilon->addr = reinterpret_cast<float *>(beta2->addr) + lens[4];
epsilon->size = lens[5] * sizeof(float);
const std::shared_ptr<std::vector<size_t>> &grad_shape = (*inputs_shape)[9];
size_t total_grad_size =
std::accumulate((*grad_shape).begin(), (*grad_shape).end(), sizeof(float), std::multiplies<size_t>());
AddressPtr grad = std::make_shared<kernel::Address>();
grad->addr = new float[total_grad_size * worker_num];
auto ret2 = memcpy_s(grad->addr, lens[6] * sizeof(float), reinterpret_cast<float *>(epsilon->addr) + lens[5],
lens[6] * sizeof(float));
if (ret2 != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret2 << ")";
}
grad->size = lens[6] * sizeof(float);
const std::shared_ptr<std::vector<size_t>> &indices_shape = (*inputs_shape)[10];
size_t total_indice_size =
std::accumulate((*indices_shape).begin(), (*indices_shape).end(), sizeof(float), std::multiplies<size_t>());
AddressPtr indices = std::make_shared<kernel::Address>();
indices->addr = new float[total_indice_size * worker_num];
auto ret3 = memcpy_s(indices->addr, lens[7] * sizeof(float),
reinterpret_cast<float *>(epsilon->addr) + lens[5] + lens[6], lens[7] * sizeof(float));
if (ret3 != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret3 << ")";
}
indices->size = lens[7] * sizeof(float);
return new SparseAdamOptimInfo(weight_addr, m, v, beta1_power, beta2_power, learning_rate, beta1, beta2, epsilon,
grad, indices, total_grad_size, total_indice_size);
}
OptimizerInfo *SparseFtrlOptimInfoBuilder::BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values,
const Lengths &lens, const InputsShapePtr &inputs_shape,
size_t worker_num) {
AddressPtr weight_addr = std::make_shared<kernel::Address>();
weight_addr->addr = weight->data();
weight_addr->size = weight->size();
AddressPtr accum = std::make_shared<kernel::Address>();
accum->addr = new float[weight->size()];
accum->size = weight->size() * sizeof(float);
for (size_t i = 0; i < weight->size(); i++) {
float *tmp = reinterpret_cast<float *>(accum->addr);
tmp[i] = 1.0;
}
AddressPtr linear = std::make_shared<kernel::Address>();
linear->addr = new float[weight->size()];
memcpy_s(linear->addr, weight->size() * sizeof(float), 0x00, weight->size() * sizeof(float));
linear->size = weight->size() * sizeof(float);
const std::shared_ptr<std::vector<size_t>> &grad_shape = (*inputs_shape)[3];
size_t total_grad_size = std::accumulate((*grad_shape).begin(), (*grad_shape).end(), 1, std::multiplies<size_t>());
AddressPtr grad = std::make_shared<kernel::Address>();
grad->addr = new float[total_grad_size * worker_num];
auto ret = memcpy_s(grad->addr, lens[0] * sizeof(float), values.data(), lens[0] * sizeof(float));
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret << ")";
}
grad->size = lens[0] * sizeof(float);
const std::shared_ptr<std::vector<size_t>> &indices_shape = (*inputs_shape)[4];
size_t total_indice_size =
std::accumulate((*indices_shape).begin(), (*indices_shape).end(), 1, std::multiplies<size_t>());
AddressPtr indices = std::make_shared<kernel::Address>();
indices->addr = new float[total_indice_size * worker_num];
auto ret2 = memcpy_s(indices->addr, lens[1] * sizeof(float), reinterpret_cast<float *>(values.data()) + lens[0],
lens[1] * sizeof(float));
if (ret2 != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret2 << ")";
}
indices->size = lens[1] * sizeof(float);
return new SparseFtrlOptimInfo(weight_addr, accum, linear, grad, indices, total_grad_size, total_indice_size);
}
} // namespace ps
} // namespace parallel
} // namespace mindspore

View File

@ -0,0 +1,66 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_MINDSPORE_CCSRC_PARALLEL_PS_OPTIMIZER_INFO_BUILDER_H_
#include <vector>
#include <memory>
#include "kernel/kernel.h"
#include "kernel/ps/pserver_kernel.h"
#include "parallel/ps/optimizer_info.h"
namespace mindspore {
namespace parallel {
namespace ps {
using mindspore::kernel::KernelMod;
using mindspore::kernel::ps::PServerKernel;
class OptimizerInfoBuilder {
public:
OptimizerInfoBuilder() = default;
virtual ~OptimizerInfoBuilder() = default;
OptimizerInfo *Build(const std::shared_ptr<PServerKernel> &pserver_kernel, const WeightPtr &weight, const Keys &keys,
const Values &values, const Lengths &lens, const InputsShapePtr &inputs_shape,
size_t worker_num);
virtual OptimizerInfo *BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values,
const Lengths &lens, const InputsShapePtr &inputs_shape, size_t worker_num) = 0;
virtual void BuildWorkspaces(OptimizerInfo *info, const std::vector<size_t> &ws_sizes, size_t worker_num);
virtual void BuildOutputs(OptimizerInfo *info, size_t worker_num) {}
};
class MomentumOptimInfoBuilder : public OptimizerInfoBuilder {
public:
OptimizerInfo *BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values, const Lengths &lens,
const InputsShapePtr &inputs_shape, size_t worker_num) override;
};
class SparseAdamOptimInfoBuilder : public OptimizerInfoBuilder {
public:
OptimizerInfo *BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values, const Lengths &lens,
const InputsShapePtr &inputs_shpae, size_t worker_num) override;
};
class SparseFtrlOptimInfoBuilder : public OptimizerInfoBuilder {
public:
OptimizerInfo *BuildInputs(const WeightPtr &weight, const Keys &keys, const Values &values, const Lengths &lens,
const InputsShapePtr &inputs_shpae, size_t worker_num) override;
};
} // namespace ps
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_MINDSPORE_CCSRC_PARALLEL_PS_OPTIMIZER_INFO_BUILDER_H_

View File

@ -118,6 +118,7 @@ list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/parallel/strategy_
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/parallel/ps/util.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/parallel/ps/scheduler.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/parallel/ps/optimizer_info.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/parallel/ps/optimizer_info_builder.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/utils/anf_ir.pb.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/utils/node_strategy.pb.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/utils/load_onnx/anf_model_parser.cc")