diff --git a/Dockerfile b/Dockerfile index 7856d3bbc492af4cad2d6b9f49001c90eadbea43..0f13acabc3e5e1f6a46c5712ca6ad199266dd5ed 100644 --- a/Dockerfile +++ b/Dockerfile @@ -57,7 +57,7 @@ RUN localedef -i en_US -f UTF-8 en_US.UTF-8 # specify sphinx version as 1.5.6 and remove -U option for [pip install -U # sphinx-rtd-theme] since -U option will cause sphinx being updated to newest # version(1.7.1 for now), which causes building documentation failed. -RUN pip install --upgrade pip && \ +RUN pip install --upgrade pip==9.0.3 && \ pip install -U wheel && \ pip install -U docopt PyYAML sphinx==1.5.6 && \ pip install sphinx-rtd-theme==0.1.9 recommonmark diff --git a/doc/fluid/dev/index_cn.rst b/doc/fluid/dev/index_cn.rst index b123b756e2251c38f319e1aefa2cb04fd7a36b03..ad798003f560e7fb0e6db6083fdd152fd3417584 100644 --- a/doc/fluid/dev/index_cn.rst +++ b/doc/fluid/dev/index_cn.rst @@ -4,6 +4,7 @@ .. toctree:: :maxdepth: 1 + api_doc_std_cn.md new_op_cn.md new_op_kernel.md use_eigen_cn.md diff --git a/doc/fluid/dev/index_en.rst b/doc/fluid/dev/index_en.rst index 98988fc22dcedecdbcd67fb3bf761377bf046337..80c899a82fa452c5cd8f38dad89c15d3041b09e3 100644 --- a/doc/fluid/dev/index_en.rst +++ b/doc/fluid/dev/index_en.rst @@ -4,6 +4,7 @@ Development .. toctree:: :maxdepth: 1 + api_doc_std_en.md new_op_en.md new_op_kernel.md use_eigen_en.md diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index e0dd9e6068174a4b0348d503f4082bee6ff68dac..5a95cbc53625888bac539f91af391ff0babec17b 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -55,21 +55,21 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( } } -void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, OpDesc *op, +void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, + const OpDesc &op, const platform::Place &p, const size_t &i) const { auto *op_handle = result->ops_.back().get(); - op_handle->dev_ctxes_[p] = const_cast( - platform::DeviceContextPool::Instance().Get(p)); + op_handle->dev_ctxes_[p] = platform::DeviceContextPool::Instance().Get(p); - auto var_names = op->InputArgumentNames(); + auto var_names = op.InputArgumentNames(); for (auto &each_var_name : var_names) { VarHandle *var = CreateOrGetLatestVarHandle(result, each_var_name, p, i); op_handle->AddInput(var); } - var_names = op->OutputArgumentNames(); + var_names = op.OutputArgumentNames(); for (auto &each_var_name : var_names) { CreateOpOutput(result, op_handle, each_var_name, p, i); @@ -107,7 +107,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( result.ops_.emplace_back(new SendOpHandle(*op, s, p)); // Create inputs for output on original place and no ssa output // is created for send op. - CreateOpHandleIOs(&result, op, p, 0); + CreateOpHandleIOs(&result, *op, p, 0); continue; } @@ -117,7 +117,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( result.ops_.emplace_back(new ComputationOpHandle(*op, s, p)); auto *op_handle = result.ops_.back().get(); - CreateOpHandleIOs(&result, op, p, i); + CreateOpHandleIOs(&result, *op, p, i); auto var_names = op->OutputArgumentNames(); diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.h b/paddle/fluid/framework/details/multi_devices_graph_builder.h index de34caab1be85eecb741a5003f026eb982e178ea..f1518d75b421006db6311c3b0f602e47000ab381 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.h +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.h @@ -45,8 +45,8 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { std::unique_ptr Build(const ProgramDesc &program) const override; private: - void CreateOpHandleIOs(SSAGraph *result, OpDesc *op, const platform::Place &p, - const size_t &i) const; + void CreateOpHandleIOs(SSAGraph *result, const OpDesc &op, + const platform::Place &p, const size_t &i) const; private: std::string loss_var_name_; diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py index 99a81c1d4244b919a53dfec36fc5a6659c10adae..c618b02a768f2ca3e2b2914d8ee0134836d5c0d2 100644 --- a/python/paddle/fluid/metrics.py +++ b/python/paddle/fluid/metrics.py @@ -169,7 +169,7 @@ class Accuracy(MetricBase): return self.value / self.weight -class ChunkEvalutor(MetricBase): +class ChunkEvaluator(MetricBase): """ Accumulate counter numbers output by chunk_eval from mini-batches and compute the precision recall and F1-score using the accumulated counter @@ -177,7 +177,7 @@ class ChunkEvalutor(MetricBase): """ def __init__(self, name=None): - super(ChunkEvalutor, self).__init__(name) + super(ChunkEvaluator, self).__init__(name) self.num_infer_chunks = 0 self.num_label_chunks = 0 self.num_correct_chunks = 0 diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 5ce2aa1fc4d0b275b502af0f97e4a0f83e85de5b..8d9f8c34899d70871c71fac2af2ca5d612ec1d08 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -61,8 +61,8 @@ class ParallelExecutor(object): main_program=test_program, share_vars_from=train_exe) - train_loss, = train_exe.run([loss.name], feed_dict=feed_dict) - test_loss, = test_exe.run([loss.name], feed_dict=feed_dict) + train_loss, = train_exe.run([loss.name], feed=feed_dict) + test_loss, = test_exe.run([loss.name], feed=feed_dict) """ self._places = [] @@ -123,22 +123,23 @@ class ParallelExecutor(object): allow_op_delay) self.scope = scope - def run(self, fetch_list, feed_dict={}): + def run(self, fetch_list, feed={}, feed_dict={}): """ :param fetch_list: A list of variable names that will be fetched. - :param feed_dict: A dict mapping for feed variable name to LoDTensor + :param feed: A dict mapping for feed variable name to LoDTensor or numpy array. :return: fetched value list. """ - if not isinstance(feed_dict, dict): - raise TypeError("feed_dict should be a dict") + feed = feed_dict + if not isinstance(feed, dict): + raise TypeError("feed should be a dict") feed_tensor_dict = {} - for i, feed_name in enumerate(feed_dict): - feed_tensor = feed_dict[feed_name] + for i, feed_name in enumerate(feed): + feed_tensor = feed[feed_name] if not isinstance(feed_tensor, core.LoDTensor): feed_tensor = core.LoDTensor() - feed_tensor.set(feed_dict[feed_name], self._act_places[0]) + feed_tensor.set(feed[feed_name], self._act_places[0]) feed_tensor_dict[feed_name] = feed_tensor fetch_var_name = '@FETCHED_VAR_NAME@'