fix spelling in a few more files

This commit is contained in:
Axel Kohlmeyer 2020-06-11 01:05:58 -04:00
parent 81d937ee97
commit 9945f73743
No known key found for this signature in database
GPG Key ID: D9B44E93BF0C375A
5 changed files with 17 additions and 17 deletions

View File

@ -83,9 +83,9 @@ void PotentialFileReader::next_dvector(double * list, int n) {
}
}
ValueTokenizer PotentialFileReader::next_values(int nparams, const std::string & seperators) {
ValueTokenizer PotentialFileReader::next_values(int nparams, const std::string & separators) {
try {
return reader->next_values(nparams, seperators);
return reader->next_values(nparams, separators);
} catch (FileReaderException & e) {
error->one(FLERR, e.what());
}

View File

@ -116,6 +116,6 @@ void TextFileReader::next_dvector(double * list, int n) {
}
}
ValueTokenizer TextFileReader::next_values(int nparams, const std::string & seperators) {
return ValueTokenizer(next_line(nparams), seperators);
ValueTokenizer TextFileReader::next_values(int nparams, const std::string & separators) {
return ValueTokenizer(next_line(nparams), separators);
}

View File

@ -368,18 +368,18 @@ std::string utils::trim_comment(const std::string & line) {
Return number of words
------------------------------------------------------------------------- */
size_t utils::count_words(const std::string & text, const std::string & seperators) {
size_t utils::count_words(const std::string & text, const std::string & separators) {
size_t count = 0;
size_t start = text.find_first_not_of(seperators);
size_t start = text.find_first_not_of(separators);
while (start != std::string::npos) {
size_t end = text.find_first_of(seperators, start);
size_t end = text.find_first_of(separators, start);
++count;
if(end == std::string::npos) {
return count;
} else {
start = text.find_first_not_of(seperators, end + 1);
start = text.find_first_not_of(separators, end + 1);
}
}
return count;
@ -389,8 +389,8 @@ size_t utils::count_words(const std::string & text, const std::string & seperato
Trim comment from string and return number of words
------------------------------------------------------------------------- */
size_t utils::trim_and_count_words(const std::string & text, const std::string & seperators) {
return utils::count_words(utils::trim_comment(text), seperators);
size_t utils::trim_and_count_words(const std::string & text, const std::string & separators) {
return utils::count_words(utils::trim_comment(text), separators);
}
/* ----------------------------------------------------------------------

View File

@ -153,18 +153,18 @@ namespace LAMMPS_NS {
/**
* \brief Count words in string
* \param text string that should be searched
* \param seperators string containing characters that will be treated as whitespace
* \param separators string containing characters that will be treated as whitespace
* \return number of words found
*/
size_t count_words(const std::string & text, const std::string & seperators = " \t\r\n\f");
size_t count_words(const std::string & text, const std::string & separators = " \t\r\n\f");
/**
* \brief Count words in a single line, trim anything from '#' onward
* \param text string that should be trimmed and searched
* \param seperators string containing characters that will be treated as whitespace
* \param separators string containing characters that will be treated as whitespace
* \return number of words found
*/
size_t trim_and_count_words(const std::string & text, const std::string & seperators = " \t\r\n\f");
size_t trim_and_count_words(const std::string & text, const std::string & separators = " \t\r\n\f");
/**
* \brief Check if string can be converted to valid integer

View File

@ -38,12 +38,12 @@ TEST(Tokenizer, two_words) {
ASSERT_EQ(t.count(), 2);
}
TEST(Tokenizer, prefix_seperators) {
TEST(Tokenizer, prefix_separators) {
Tokenizer t(" test word", " ");
ASSERT_EQ(t.count(), 2);
}
TEST(Tokenizer, postfix_seperators) {
TEST(Tokenizer, postfix_separators) {
Tokenizer t("test word ", " ");
ASSERT_EQ(t.count(), 2);
}
@ -55,7 +55,7 @@ TEST(Tokenizer, iterate_words) {
ASSERT_EQ(t.count(), 2);
}
TEST(Tokenizer, default_seperators) {
TEST(Tokenizer, default_separators) {
Tokenizer t(" \r\n test \t word \f");
ASSERT_THAT(t.next(), Eq("test"));
ASSERT_THAT(t.next(), Eq("word"));