From 88e430d154d74190966335d2d33cfbd4fb4cae9d Mon Sep 17 00:00:00 2001 From: Travis CI Date: Fri, 12 Jan 2018 07:53:47 +0000 Subject: [PATCH] Deploy to GitHub Pages: 3423022e84fdb9eef76d051ed35524422f7a02b4 --- develop/doc/operators.json | 48 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/develop/doc/operators.json b/develop/doc/operators.json index 5c0053c43d2..0f5f1616262 100644 --- a/develop/doc/operators.json +++ b/develop/doc/operators.json @@ -27,6 +27,54 @@ "intermediate" : 0 } ], "attrs" : [ ] +},{ + "type" : "print", + "comment" : "\n Creates a print op that will print when a tensor is accessed.\n\n Wraps the tensor passed in so that whenever that a tensor is accessed,\n the message `message` is printed, along with the current value of the\n tensor `t`.", + "inputs" : [ + { + "name" : "input", + "comment" : "the tensor that will be displayed.", + "duplicable" : 0, + "intermediate" : 0 + } ], + "outputs" : [ ], + "attrs" : [ + { + "name" : "first_n", + "type" : "int", + "comment" : "Only log `first_n` number of times.", + "generated" : 0 + }, { + "name" : "message", + "type" : "string", + "comment" : "A string message to print as a prefix.", + "generated" : 0 + }, { + "name" : "summarize", + "type" : "int", + "comment" : "Print this number of elements in the tensor.", + "generated" : 0 + }, { + "name" : "print_tensor_name", + "type" : "bool", + "comment" : "Whether to print the tensor name.", + "generated" : 0 + }, { + "name" : "print_tensor_type", + "type" : "bool", + "comment" : "Whether to print the tensor's dtype.", + "generated" : 0 + }, { + "name" : "print_tensor_shape", + "type" : "bool", + "comment" : "Whether to print the tensor's shape.", + "generated" : 0 + }, { + "name" : "print_tensor_lod", + "type" : "bool", + "comment" : "Whether to print the tensor's lod.", + "generated" : 0 + } ] },{ "type" : "adagrad", "comment" : "\n\nAdaptive Gradient Algorithm (Adagrad).\n\nThe update is done as follows:\n\n$$moment\\_out = moment + grad * grad \\\\\nparam\\_out = param - \\frac{learning\\_rate * grad}{\\sqrt{moment\\_out} + \\epsilon}\n$$\n\nThe original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)\ndoes not have the epsilon attribute. It is added here in our implementation\nas also proposed here: http://cs231n.github.io/neural-networks-3/#ada\nfor numerical stability to avoid the division by zero error.\n\n", -- GitLab