提交 9a3baf4f 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!4314 [MD] remove defaults used in binding code, and remove bilinear resize op

Merge pull request !4314 from nhussain/clean_up_bindings
......@@ -32,45 +32,37 @@ namespace mindspore {
namespace dataset {
PYBIND_REGISTER(ConcatenateOp, 1, ([](const py::module *m) {
(void)py::class_<ConcatenateOp, TensorOp, std::shared_ptr<ConcatenateOp>>(
*m, "ConcatenateOp", "Tensor operation concatenate tensors.")
.def(py::init<int8_t, std::shared_ptr<Tensor>, std::shared_ptr<Tensor>>(), py::arg("axis"),
py::arg("prepend").none(true), py::arg("append").none(true));
(void)py::class_<ConcatenateOp, TensorOp, std::shared_ptr<ConcatenateOp>>(*m, "ConcatenateOp")
.def(py::init<int8_t, std::shared_ptr<Tensor>, std::shared_ptr<Tensor>>());
}));
PYBIND_REGISTER(DuplicateOp, 1, ([](const py::module *m) {
(void)py::class_<DuplicateOp, TensorOp, std::shared_ptr<DuplicateOp>>(*m, "DuplicateOp",
"Duplicate tensor.")
.def(py::init<>());
}));
PYBIND_REGISTER(
DuplicateOp, 1, ([](const py::module *m) {
(void)py::class_<DuplicateOp, TensorOp, std::shared_ptr<DuplicateOp>>(*m, "DuplicateOp").def(py::init<>());
}));
PYBIND_REGISTER(FillOp, 1, ([](const py::module *m) {
(void)py::class_<FillOp, TensorOp, std::shared_ptr<FillOp>>(
*m, "FillOp", "Tensor operation to return tensor filled with same value as input fill value.")
.def(py::init<std::shared_ptr<Tensor>>());
}));
PYBIND_REGISTER(
FillOp, 1, ([](const py::module *m) {
(void)py::class_<FillOp, TensorOp, std::shared_ptr<FillOp>>(*m, "FillOp").def(py::init<std::shared_ptr<Tensor>>());
}));
PYBIND_REGISTER(MaskOp, 1, ([](const py::module *m) {
(void)py::class_<MaskOp, TensorOp, std::shared_ptr<MaskOp>>(
*m, "MaskOp", "Tensor mask operation using relational comparator")
(void)py::class_<MaskOp, TensorOp, std::shared_ptr<MaskOp>>(*m, "MaskOp")
.def(py::init<RelationalOp, std::shared_ptr<Tensor>, DataType>());
}));
PYBIND_REGISTER(OneHotOp, 1, ([](const py::module *m) {
(void)py::class_<OneHotOp, TensorOp, std::shared_ptr<OneHotOp>>(
*m, "OneHotOp", "Tensor operation to apply one hot encoding. Takes number of classes.")
.def(py::init<int32_t>());
}));
PYBIND_REGISTER(
OneHotOp, 1, ([](const py::module *m) {
(void)py::class_<OneHotOp, TensorOp, std::shared_ptr<OneHotOp>>(*m, "OneHotOp").def(py::init<int32_t>());
}));
PYBIND_REGISTER(PadEndOp, 1, ([](const py::module *m) {
(void)py::class_<PadEndOp, TensorOp, std::shared_ptr<PadEndOp>>(
*m, "PadEndOp", "Tensor operation to pad end of tensor with a pad value.")
(void)py::class_<PadEndOp, TensorOp, std::shared_ptr<PadEndOp>>(*m, "PadEndOp")
.def(py::init<TensorShape, std::shared_ptr<Tensor>>());
}));
PYBIND_REGISTER(SliceOp, 1, ([](const py::module *m) {
(void)py::class_<SliceOp, TensorOp, std::shared_ptr<SliceOp>>(*m, "SliceOp",
"Tensor slice operation.")
(void)py::class_<SliceOp, TensorOp, std::shared_ptr<SliceOp>>(*m, "SliceOp")
.def(py::init<bool>())
.def(py::init([](const py::list &py_list) {
std::vector<dsize_t> c_list;
......@@ -105,17 +97,15 @@ PYBIND_REGISTER(SliceOp, 1, ([](const py::module *m) {
}));
PYBIND_REGISTER(ToFloat16Op, 1, ([](const py::module *m) {
(void)py::class_<ToFloat16Op, TensorOp, std::shared_ptr<ToFloat16Op>>(
*m, "ToFloat16Op", py::dynamic_attr(),
"Tensor operator to type cast float32 data to a float16 type.")
(void)py::class_<ToFloat16Op, TensorOp, std::shared_ptr<ToFloat16Op>>(*m, "ToFloat16Op",
py::dynamic_attr())
.def(py::init<>());
}));
PYBIND_REGISTER(TypeCastOp, 1, ([](const py::module *m) {
(void)py::class_<TypeCastOp, TensorOp, std::shared_ptr<TypeCastOp>>(
*m, "TypeCastOp", "Tensor operator to type cast data to a specified type.")
.def(py::init<DataType>(), py::arg("data_type"))
.def(py::init<std::string>(), py::arg("data_type"));
(void)py::class_<TypeCastOp, TensorOp, std::shared_ptr<TypeCastOp>>(*m, "TypeCastOp")
.def(py::init<DataType>())
.def(py::init<std::string>());
}));
PYBIND_REGISTER(RelationalOp, 0, ([](const py::module *m) {
......
......@@ -46,73 +46,50 @@ namespace dataset {
#ifdef ENABLE_ICU4C
PYBIND_REGISTER(BasicTokenizerOp, 1, ([](const py::module *m) {
(void)py::class_<BasicTokenizerOp, TensorOp, std::shared_ptr<BasicTokenizerOp>>(
*m, "BasicTokenizerOp", "Tokenize a scalar tensor of UTF-8 string by specific rules.")
.def(py::init<const bool &, const bool &, const NormalizeForm &, const bool &, const bool &>(),
py::arg("lower_case") = BasicTokenizerOp::kDefLowerCase,
py::arg("keep_whitespace") = BasicTokenizerOp::kDefKeepWhitespace,
py::arg("normalization_form") = BasicTokenizerOp::kDefNormalizationForm,
py::arg("preserve_unused_token") = BasicTokenizerOp::kDefPreserveUnusedToken,
py::arg("with_offsets") = BasicTokenizerOp::kDefWithOffsets);
(void)py::class_<BasicTokenizerOp, TensorOp, std::shared_ptr<BasicTokenizerOp>>(*m,
"BasicTokenizerOp")
.def(py::init<const bool &, const bool &, const NormalizeForm &, const bool &, const bool &>());
}));
PYBIND_REGISTER(WhitespaceTokenizerOp, 1, ([](const py::module *m) {
(void)py::class_<WhitespaceTokenizerOp, TensorOp, std::shared_ptr<WhitespaceTokenizerOp>>(
*m, "WhitespaceTokenizerOp", "Tokenize a scalar tensor of UTF-8 string on ICU defined whitespaces.")
.def(py::init<const bool &>(), py::arg(" with_offsets ") = WhitespaceTokenizerOp::kDefWithOffsets);
*m, "WhitespaceTokenizerOp")
.def(py::init<const bool &>());
}));
PYBIND_REGISTER(UnicodeScriptTokenizerOp, 1, ([](const py::module *m) {
(void)py::class_<UnicodeScriptTokenizerOp, TensorOp, std::shared_ptr<UnicodeScriptTokenizerOp>>(
*m, "UnicodeScriptTokenizerOp",
"Tokenize a scalar tensor of UTF-8 string on Unicode script boundaries.")
*m, "UnicodeScriptTokenizerOp")
.def(py::init<>())
.def(py::init<const bool &, const bool &>(),
py::arg("keep_whitespace") = UnicodeScriptTokenizerOp::kDefKeepWhitespace,
py::arg("with_offsets") = UnicodeScriptTokenizerOp::kDefWithOffsets);
.def(py::init<const bool &, const bool &>());
}));
PYBIND_REGISTER(CaseFoldOp, 1, ([](const py::module *m) {
(void)py::class_<CaseFoldOp, TensorOp, std::shared_ptr<CaseFoldOp>>(
*m, "CaseFoldOp", "Apply case fold operation on utf-8 string tensor")
.def(py::init<>());
}));
PYBIND_REGISTER(
CaseFoldOp, 1, ([](const py::module *m) {
(void)py::class_<CaseFoldOp, TensorOp, std::shared_ptr<CaseFoldOp>>(*m, "CaseFoldOp").def(py::init<>());
}));
PYBIND_REGISTER(NormalizeUTF8Op, 1, ([](const py::module *m) {
(void)py::class_<NormalizeUTF8Op, TensorOp, std::shared_ptr<NormalizeUTF8Op>>(
*m, "NormalizeUTF8Op", "Apply normalize operation on utf-8 string tensor.")
(void)py::class_<NormalizeUTF8Op, TensorOp, std::shared_ptr<NormalizeUTF8Op>>(*m, "NormalizeUTF8Op")
.def(py::init<>())
.def(py::init<NormalizeForm>(), py::arg("normalize_form") = NormalizeUTF8Op::kDefNormalizeForm);
.def(py::init<NormalizeForm>());
}));
PYBIND_REGISTER(RegexReplaceOp, 1, ([](const py::module *m) {
(void)py::class_<RegexReplaceOp, TensorOp, std::shared_ptr<RegexReplaceOp>>(
*m, "RegexReplaceOp",
"Replace utf-8 string tensor with 'replace' according to regular expression 'pattern'.")
.def(py::init<const std::string &, const std::string &, bool>(), py::arg("pattern"),
py::arg("replace"), py::arg("replace_all"));
(void)py::class_<RegexReplaceOp, TensorOp, std::shared_ptr<RegexReplaceOp>>(*m, "RegexReplaceOp")
.def(py::init<const std::string &, const std::string &, bool>());
}));
PYBIND_REGISTER(RegexTokenizerOp, 1, ([](const py::module *m) {
(void)py::class_<RegexTokenizerOp, TensorOp, std::shared_ptr<RegexTokenizerOp>>(
*m, "RegexTokenizerOp", "Tokenize a scalar tensor of UTF-8 string by regex expression pattern.")
.def(py::init<const std::string &, const std::string &, const bool &>(), py::arg("delim_pattern"),
py::arg("keep_delim_pattern"), py::arg("with_offsets") = RegexTokenizerOp::kDefWithOffsets);
(void)py::class_<RegexTokenizerOp, TensorOp, std::shared_ptr<RegexTokenizerOp>>(*m,
"RegexTokenizerOp")
.def(py::init<const std::string &, const std::string &, const bool &>());
}));
PYBIND_REGISTER(BertTokenizerOp, 1, ([](const py::module *m) {
(void)py::class_<BertTokenizerOp, TensorOp, std::shared_ptr<BertTokenizerOp>>(
*m, "BertTokenizerOp", "Tokenizer used for Bert text process.")
(void)py::class_<BertTokenizerOp, TensorOp, std::shared_ptr<BertTokenizerOp>>(*m, "BertTokenizerOp")
.def(py::init<const std::shared_ptr<Vocab> &, const std::string &, const int &, const std::string &,
const bool &, const bool &, const NormalizeForm &, const bool &, const bool &>(),
py::arg("vocab"),
py::arg("suffix_indicator") = std::string(WordpieceTokenizerOp::kDefSuffixIndicator),
py::arg("max_bytes_per_token") = WordpieceTokenizerOp::kDefMaxBytesPerToken,
py::arg("unknown_token") = std::string(WordpieceTokenizerOp::kDefUnknownToken),
py::arg("lower_case") = BasicTokenizerOp::kDefLowerCase,
py::arg("keep_whitespace") = BasicTokenizerOp::kDefKeepWhitespace,
py::arg("normalization_form") = BasicTokenizerOp::kDefNormalizationForm,
py::arg("preserve_unused_token") = BasicTokenizerOp::kDefPreserveUnusedToken,
py::arg("with_offsets") = WordpieceTokenizerOp::kDefWithOffsets);
const bool &, const bool &, const NormalizeForm &, const bool &, const bool &>());
}));
PYBIND_REGISTER(NormalizeForm, 0, ([](const py::module *m) {
......@@ -128,11 +105,9 @@ PYBIND_REGISTER(NormalizeForm, 0, ([](const py::module *m) {
#endif
PYBIND_REGISTER(JiebaTokenizerOp, 1, ([](const py::module *m) {
(void)py::class_<JiebaTokenizerOp, TensorOp, std::shared_ptr<JiebaTokenizerOp>>(
*m, "JiebaTokenizerOp", "")
.def(py::init<const std::string &, const std::string &, const JiebaMode &, const bool &>(),
py::arg("hmm_path"), py::arg("mp_path"), py::arg("mode") = JiebaMode::kMix,
py::arg("with_offsets") = JiebaTokenizerOp::kDefWithOffsets)
(void)py::class_<JiebaTokenizerOp, TensorOp, std::shared_ptr<JiebaTokenizerOp>>(*m,
"JiebaTokenizerOp")
.def(py::init<const std::string &, const std::string &, const JiebaMode &, const bool &>())
.def("add_word", [](JiebaTokenizerOp &self, const std::string word, int freq) {
THROW_IF_ERROR(self.AddWord(word, freq));
});
......@@ -140,13 +115,12 @@ PYBIND_REGISTER(JiebaTokenizerOp, 1, ([](const py::module *m) {
PYBIND_REGISTER(UnicodeCharTokenizerOp, 1, ([](const py::module *m) {
(void)py::class_<UnicodeCharTokenizerOp, TensorOp, std::shared_ptr<UnicodeCharTokenizerOp>>(
*m, "UnicodeCharTokenizerOp", "Tokenize a scalar tensor of UTF-8 string to Unicode characters.")
.def(py::init<const bool &>(), py::arg("with_offsets") = UnicodeCharTokenizerOp::kDefWithOffsets);
*m, "UnicodeCharTokenizerOp")
.def(py::init<const bool &>());
}));
PYBIND_REGISTER(LookupOp, 1, ([](const py::module *m) {
(void)py::class_<LookupOp, TensorOp, std::shared_ptr<LookupOp>>(
*m, "LookupOp", "Tensor operation to LookUp each word.")
(void)py::class_<LookupOp, TensorOp, std::shared_ptr<LookupOp>>(*m, "LookupOp")
.def(py::init([](std::shared_ptr<Vocab> vocab, const py::object &py_word) {
if (vocab == nullptr) {
THROW_IF_ERROR(Status(StatusCode::kUnexpectedError, "vocab object type is incorrect or null."));
......@@ -165,56 +139,42 @@ PYBIND_REGISTER(LookupOp, 1, ([](const py::module *m) {
}));
PYBIND_REGISTER(NgramOp, 1, ([](const py::module *m) {
(void)py::class_<NgramOp, TensorOp, std::shared_ptr<NgramOp>>(*m, "NgramOp",
"TensorOp performs ngram mapping.")
(void)py::class_<NgramOp, TensorOp, std::shared_ptr<NgramOp>>(*m, "NgramOp")
.def(py::init<const std::vector<int32_t> &, int32_t, int32_t, const std::string &,
const std::string &, const std::string &>(),
py::arg("ngrams"), py::arg("l_pad_len"), py::arg("r_pad_len"), py::arg("l_pad_token"),
py::arg("r_pad_token"), py::arg("separator"));
const std::string &, const std::string &>());
}));
PYBIND_REGISTER(
WordpieceTokenizerOp, 1, ([](const py::module *m) {
(void)py::class_<WordpieceTokenizerOp, TensorOp, std::shared_ptr<WordpieceTokenizerOp>>(
*m, "WordpieceTokenizerOp", "Tokenize scalar token or 1-D tokens to subword tokens.")
.def(
py::init<const std::shared_ptr<Vocab> &, const std::string &, const int &, const std::string &, const bool &>(),
py::arg("vocab"), py::arg("suffix_indicator") = std::string(WordpieceTokenizerOp::kDefSuffixIndicator),
py::arg("max_bytes_per_token") = WordpieceTokenizerOp::kDefMaxBytesPerToken,
py::arg("unknown_token") = std::string(WordpieceTokenizerOp::kDefUnknownToken),
py::arg("with_offsets") = WordpieceTokenizerOp::kDefWithOffsets);
}));
PYBIND_REGISTER(WordpieceTokenizerOp, 1, ([](const py::module *m) {
(void)py::class_<WordpieceTokenizerOp, TensorOp, std::shared_ptr<WordpieceTokenizerOp>>(
*m, "WordpieceTokenizerOp")
.def(py::init<const std::shared_ptr<Vocab> &, const std::string &, const int &, const std::string &,
const bool &>());
}));
PYBIND_REGISTER(SlidingWindowOp, 1, ([](const py::module *m) {
(void)py::class_<SlidingWindowOp, TensorOp, std::shared_ptr<SlidingWindowOp>>(
*m, "SlidingWindowOp", "TensorOp to apply sliding window to a 1-D Tensor.")
.def(py::init<uint32_t, int32_t>(), py::arg("width"), py::arg("axis"));
(void)py::class_<SlidingWindowOp, TensorOp, std::shared_ptr<SlidingWindowOp>>(*m, "SlidingWindowOp")
.def(py::init<uint32_t, int32_t>());
}));
PYBIND_REGISTER(
SentencePieceTokenizerOp, 1, ([](const py::module *m) {
(void)py::class_<SentencePieceTokenizerOp, TensorOp, std::shared_ptr<SentencePieceTokenizerOp>>(
*m, "SentencePieceTokenizerOp", "Tokenize scalar token or 1-D tokens to tokens by sentence piece.")
*m, "SentencePieceTokenizerOp")
.def(
py::init<std::shared_ptr<SentencePieceVocab> &, const SPieceTokenizerLoadType, const SPieceTokenizerOutType>(),
py::arg("vocab"), py::arg("load_type") = SPieceTokenizerLoadType::kModel,
py::arg("out_type") = SPieceTokenizerOutType::kString)
py::init<std::shared_ptr<SentencePieceVocab> &, const SPieceTokenizerLoadType, const SPieceTokenizerOutType>())
.def(py::init<const std::string &, const std::string &, const SPieceTokenizerLoadType,
const SPieceTokenizerOutType>(),
py::arg("model_path"), py::arg("model_filename"), py::arg("load_type") = SPieceTokenizerLoadType::kFile,
py::arg("out_type") = SPieceTokenizerOutType::kString);
const SPieceTokenizerOutType>());
}));
PYBIND_REGISTER(ToNumberOp, 1, ([](const py::module *m) {
(void)py::class_<ToNumberOp, TensorOp, std::shared_ptr<ToNumberOp>>(
*m, "ToNumberOp", "TensorOp to convert strings to numbers.")
.def(py::init<DataType>(), py::arg("data_type"))
.def(py::init<std::string>(), py::arg("data_type"));
(void)py::class_<ToNumberOp, TensorOp, std::shared_ptr<ToNumberOp>>(*m, "ToNumberOp")
.def(py::init<DataType>())
.def(py::init<std::string>());
}));
PYBIND_REGISTER(TruncateSequencePairOp, 1, ([](const py::module *m) {
(void)py::class_<TruncateSequencePairOp, TensorOp, std::shared_ptr<TruncateSequencePairOp>>(
*m, "TruncateSequencePairOp", "Tensor operation to truncate two tensors to a max_length")
*m, "TruncateSequencePairOp")
.def(py::init<int64_t>());
}));
......
......@@ -62,7 +62,7 @@ PYBIND_REGISTER(
ShardSample, 0, ([](const py::module *m) {
(void)py::class_<mindrecord::ShardSample, mindrecord::ShardOperator, std::shared_ptr<mindrecord::ShardSample>>(
*m, "MindrecordSubsetRandomSampler")
.def(py::init<std::vector<int64_t>, uint32_t>(), py::arg("indices"), py::arg("seed") = GetSeed());
.def(py::init<std::vector<int64_t>, uint32_t>());
}));
PYBIND_REGISTER(ShardSequentialSample, 0, ([](const py::module *m) {
......
......@@ -37,7 +37,6 @@ add_library(kernels-image OBJECT
random_vertical_flip_with_bbox_op.cc
random_sharpness_op.cc
rescale_op.cc
resize_bilinear_op.cc
resize_op.cc
rgba_to_bgr_op.cc
rgba_to_rgb_op.cc
......
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "minddata/dataset/kernels/image/resize_bilinear_op.h"
#include <random>
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
const int32_t ResizeBilinearOp::kDefWidth = 0;
} // namespace dataset
} // namespace mindspore
......@@ -21,6 +21,7 @@ User can also define custom sampler by extending from Sampler class.
import numpy as np
import mindspore._c_dataengine as cde
import mindspore.dataset as ds
class Sampler:
"""
......@@ -541,7 +542,7 @@ class SubsetRandomSampler(BuiltinSampler):
return self.child_sampler.is_sharded()
def create_for_minddataset(self):
c_sampler = cde.MindrecordSubsetRandomSampler(self.indices)
c_sampler = cde.MindrecordSubsetRandomSampler(self.indices, ds.config.get_seed())
c_child_sampler = self.create_child_for_minddataset()
c_sampler.add_child(c_child_sampler)
return c_sampler
......
......@@ -52,8 +52,8 @@ import mindspore._c_dataengine as cde
from .utils import JiebaMode, NormalizeForm, to_str, SPieceTokenizerOutType, SPieceTokenizerLoadType
from .validators import check_lookup, check_jieba_add_dict, \
check_jieba_add_word, check_jieba_init, check_with_offsets, check_unicode_script_tokenizer,\
check_wordpiece_tokenizer, check_regex_tokenizer, check_basic_tokenizer, check_ngram, check_pair_truncate,\
check_jieba_add_word, check_jieba_init, check_with_offsets, check_unicode_script_tokenizer, \
check_wordpiece_tokenizer, check_regex_tokenizer, check_basic_tokenizer, check_ngram, check_pair_truncate, \
check_to_number, check_bert_tokenizer, check_python_tokenizer, check_slidingwindow
from ..core.datatypes import mstype_to_detype
......@@ -100,7 +100,8 @@ class SlidingWindow(cde.SlidingWindowOp):
@check_slidingwindow
def __init__(self, width, axis=0):
super().__init__(width=width, axis=axis)
super().__init__(width, axis)
class Ngram(cde.NgramOp):
......@@ -126,8 +127,7 @@ class Ngram(cde.NgramOp):
@check_ngram
def __init__(self, n, left_pad=("", 0), right_pad=("", 0), separator=" "):
super().__init__(ngrams=n, l_pad_len=left_pad[1], r_pad_len=right_pad[1], l_pad_token=left_pad[0],
r_pad_token=right_pad[0], separator=separator)
super().__init__(n, left_pad[1], right_pad[1], left_pad[0], right_pad[0], separator)
DE_C_INTER_JIEBA_MODE = {
......@@ -326,6 +326,7 @@ class WordpieceTokenizer(cde.WordpieceTokenizerOp):
super().__init__(self.vocab, self.suffix_indicator, self.max_bytes_per_token,
self.unknown_token, self.with_offsets)
DE_C_INTER_SENTENCEPIECE_LOADTYPE = {
SPieceTokenizerLoadType.FILE: cde.SPieceTokenizerLoadType.DE_SPIECE_TOKENIZER_LOAD_KFILE,
SPieceTokenizerLoadType.MODEL: cde.SPieceTokenizerLoadType.DE_SPIECE_TOKENIZER_LOAD_KMODEL
......@@ -336,6 +337,7 @@ DE_C_INTER_SENTENCEPIECE_OUTTYPE = {
SPieceTokenizerOutType.INT: cde.SPieceTokenizerOutType.DE_SPIECE_TOKENIZER_OUTTYPE_KINT
}
class SentencePieceTokenizer(cde.SentencePieceTokenizerOp):
"""
Tokenize scalar token or 1-D tokens to tokens by sentencepiece.
......@@ -357,6 +359,7 @@ class SentencePieceTokenizer(cde.SentencePieceTokenizerOp):
super().__init__(mode, DE_C_INTER_SENTENCEPIECE_LOADTYPE[SPieceTokenizerLoadType.MODEL],
DE_C_INTER_SENTENCEPIECE_OUTTYPE[out_type])
if platform.system().lower() != 'windows':
class WhitespaceTokenizer(cde.WhitespaceTokenizerOp):
"""
......
......@@ -362,7 +362,7 @@ class RandomCrop(cde.RandomCropOp):
padding = (0, 0, 0, 0)
else:
padding = parse_padding(padding)
if isinstance(fill_value, int): # temporary fix
if isinstance(fill_value, int):
fill_value = tuple([fill_value] * 3)
border_type = DE_C_BORDER_TYPE[padding_mode]
......@@ -417,7 +417,7 @@ class RandomCropWithBBox(cde.RandomCropWithBBoxOp):
else:
padding = parse_padding(padding)
if isinstance(fill_value, int): # temporary fix
if isinstance(fill_value, int):
fill_value = tuple([fill_value] * 3)
border_type = DE_C_BORDER_TYPE[padding_mode]
......@@ -549,9 +549,8 @@ class Resize(cde.ResizeOp):
self.interpolation = interpolation
interpoltn = DE_C_INTER_MODE[interpolation]
if isinstance(size, int):
super().__init__(size, interpolation=interpoltn)
else:
super().__init__(*size, interpoltn)
size = (size, 0)
super().__init__(*size, interpoltn)
class ResizeWithBBox(cde.ResizeWithBBoxOp):
......@@ -579,9 +578,8 @@ class ResizeWithBBox(cde.ResizeWithBBoxOp):
self.interpolation = interpolation
interpoltn = DE_C_INTER_MODE[interpolation]
if isinstance(size, int):
super().__init__(size, interpolation=interpoltn)
else:
super().__init__(*size, interpoltn)
size = (size, 0)
super().__init__(*size, interpoltn)
class RandomResizedCropWithBBox(cde.RandomCropAndResizeWithBBoxOp):
......@@ -779,7 +777,7 @@ class RandomRotation(cde.RandomRotationOp):
degrees = (-degrees, degrees)
if center is None:
center = (-1, -1)
if isinstance(fill_value, int): # temporary fix
if isinstance(fill_value, int):
fill_value = tuple([fill_value] * 3)
interpolation = DE_C_INTER_MODE[resample]
super().__init__(*degrees, *center, interpolation, expand, *fill_value)
......@@ -816,9 +814,8 @@ class RandomResize(cde.RandomResizeOp):
def __init__(self, size):
self.size = size
if isinstance(size, int):
super().__init__(size)
else:
super().__init__(*size)
size = (size, 0)
super().__init__(*size)
class RandomResizeWithBBox(cde.RandomResizeWithBBoxOp):
......@@ -837,9 +834,8 @@ class RandomResizeWithBBox(cde.RandomResizeWithBBoxOp):
def __init__(self, size):
self.size = size
if isinstance(size, int):
super().__init__(size)
else:
super().__init__(*size)
size = (size, 0)
super().__init__(*size)
class HWC2CHW(cde.ChannelSwapOp):
......@@ -918,7 +914,7 @@ class Pad(cde.PadOp):
@check_pad
def __init__(self, padding, fill_value=0, padding_mode=Border.CONSTANT):
padding = parse_padding(padding)
if isinstance(fill_value, int): # temporary fix
if isinstance(fill_value, int):
fill_value = tuple([fill_value] * 3)
padding_mode = DE_C_BORDER_TYPE[padding_mode]
......
......@@ -59,7 +59,6 @@ SET(DE_UT_SRCS
rename_op_test.cc
repeat_op_test.cc
rescale_op_test.cc
resize_bilinear_op_test.cc
resize_op_test.cc
resize_with_bbox_op_test.cc
rgba_to_bgr_op_test.cc
......
......@@ -1200,4 +1200,8 @@ TEST_F(MindDataTestPipeline, TestRandomSolarizeFail) {
threshold = {1};
random_solarize = mindspore::dataset::api::vision::RandomSolarize(threshold);
EXPECT_EQ(random_solarize, nullptr);
threshold = {};
random_solarize = mindspore::dataset::api::vision::RandomSolarize(threshold);
EXPECT_EQ(random_solarize, nullptr);
}
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/common.h"
#include "common/cvop_common.h"
#include "minddata/dataset/kernels/image/resize_bilinear_op.h"
#include "utils/log_adapter.h"
using namespace mindspore::dataset;
using mindspore::MsLogLevel::INFO;
using mindspore::ExceptionType::NoExceptionType;
using mindspore::LogStream;
class MindDataTestResizeBilinearOp : public UT::CVOP::CVOpCommon {
public:
MindDataTestResizeBilinearOp() : CVOpCommon() {}
};
TEST_F(MindDataTestResizeBilinearOp, TestOp) {
MS_LOG(INFO) << "Doing testResizeBilinear.";
// Resizing with a factor of 0.5
TensorShape s = input_tensor_->shape();
int output_w = 0.5 * s[0];
int output_h = (s[0] * output_w) / s[1];
std::shared_ptr<Tensor> output_tensor;
// Resizing
std::unique_ptr<ResizeBilinearOp> op(new ResizeBilinearOp(output_h, output_w));
Status st = op->Compute(input_tensor_, &output_tensor);
EXPECT_TRUE(st.IsOk());
CheckImageShapeAndData(output_tensor, kResizeBilinear);
MS_LOG(INFO) << "testResizeBilinear end.";
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册