From 9945f737438288eca9564cd4650e43dc23c27476 Mon Sep 17 00:00:00 2001 From: Axel Kohlmeyer Date: Thu, 11 Jun 2020 01:05:58 -0400 Subject: [PATCH] fix spelling in a few more files --- src/potential_file_reader.cpp | 4 ++-- src/text_file_reader.cpp | 4 ++-- src/utils.cpp | 12 ++++++------ src/utils.h | 8 ++++---- unittest/utils/test_tokenizer.cpp | 6 +++--- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/potential_file_reader.cpp b/src/potential_file_reader.cpp index 2ec7908ae3..5fd361eff5 100644 --- a/src/potential_file_reader.cpp +++ b/src/potential_file_reader.cpp @@ -83,9 +83,9 @@ void PotentialFileReader::next_dvector(double * list, int n) { } } -ValueTokenizer PotentialFileReader::next_values(int nparams, const std::string & seperators) { +ValueTokenizer PotentialFileReader::next_values(int nparams, const std::string & separators) { try { - return reader->next_values(nparams, seperators); + return reader->next_values(nparams, separators); } catch (FileReaderException & e) { error->one(FLERR, e.what()); } diff --git a/src/text_file_reader.cpp b/src/text_file_reader.cpp index 8063bba87f..9015ddecee 100644 --- a/src/text_file_reader.cpp +++ b/src/text_file_reader.cpp @@ -116,6 +116,6 @@ void TextFileReader::next_dvector(double * list, int n) { } } -ValueTokenizer TextFileReader::next_values(int nparams, const std::string & seperators) { - return ValueTokenizer(next_line(nparams), seperators); +ValueTokenizer TextFileReader::next_values(int nparams, const std::string & separators) { + return ValueTokenizer(next_line(nparams), separators); } diff --git a/src/utils.cpp b/src/utils.cpp index 72193bb2c8..928a84883c 100644 --- a/src/utils.cpp +++ b/src/utils.cpp @@ -368,18 +368,18 @@ std::string utils::trim_comment(const std::string & line) { Return number of words ------------------------------------------------------------------------- */ -size_t utils::count_words(const std::string & text, const std::string & seperators) { +size_t utils::count_words(const std::string & text, const std::string & separators) { size_t count = 0; - size_t start = text.find_first_not_of(seperators); + size_t start = text.find_first_not_of(separators); while (start != std::string::npos) { - size_t end = text.find_first_of(seperators, start); + size_t end = text.find_first_of(separators, start); ++count; if(end == std::string::npos) { return count; } else { - start = text.find_first_not_of(seperators, end + 1); + start = text.find_first_not_of(separators, end + 1); } } return count; @@ -389,8 +389,8 @@ size_t utils::count_words(const std::string & text, const std::string & seperato Trim comment from string and return number of words ------------------------------------------------------------------------- */ -size_t utils::trim_and_count_words(const std::string & text, const std::string & seperators) { - return utils::count_words(utils::trim_comment(text), seperators); +size_t utils::trim_and_count_words(const std::string & text, const std::string & separators) { + return utils::count_words(utils::trim_comment(text), separators); } /* ---------------------------------------------------------------------- diff --git a/src/utils.h b/src/utils.h index 79fb2349d3..562293f2f3 100644 --- a/src/utils.h +++ b/src/utils.h @@ -153,18 +153,18 @@ namespace LAMMPS_NS { /** * \brief Count words in string * \param text string that should be searched - * \param seperators string containing characters that will be treated as whitespace + * \param separators string containing characters that will be treated as whitespace * \return number of words found */ - size_t count_words(const std::string & text, const std::string & seperators = " \t\r\n\f"); + size_t count_words(const std::string & text, const std::string & separators = " \t\r\n\f"); /** * \brief Count words in a single line, trim anything from '#' onward * \param text string that should be trimmed and searched - * \param seperators string containing characters that will be treated as whitespace + * \param separators string containing characters that will be treated as whitespace * \return number of words found */ - size_t trim_and_count_words(const std::string & text, const std::string & seperators = " \t\r\n\f"); + size_t trim_and_count_words(const std::string & text, const std::string & separators = " \t\r\n\f"); /** * \brief Check if string can be converted to valid integer diff --git a/unittest/utils/test_tokenizer.cpp b/unittest/utils/test_tokenizer.cpp index 09487aabff..903f660959 100644 --- a/unittest/utils/test_tokenizer.cpp +++ b/unittest/utils/test_tokenizer.cpp @@ -38,12 +38,12 @@ TEST(Tokenizer, two_words) { ASSERT_EQ(t.count(), 2); } -TEST(Tokenizer, prefix_seperators) { +TEST(Tokenizer, prefix_separators) { Tokenizer t(" test word", " "); ASSERT_EQ(t.count(), 2); } -TEST(Tokenizer, postfix_seperators) { +TEST(Tokenizer, postfix_separators) { Tokenizer t("test word ", " "); ASSERT_EQ(t.count(), 2); } @@ -55,7 +55,7 @@ TEST(Tokenizer, iterate_words) { ASSERT_EQ(t.count(), 2); } -TEST(Tokenizer, default_seperators) { +TEST(Tokenizer, default_separators) { Tokenizer t(" \r\n test \t word \f"); ASSERT_THAT(t.next(), Eq("test")); ASSERT_THAT(t.next(), Eq("word"));