add docs for TextFileReader class to developer guide

This commit is contained in:
Axel Kohlmeyer 2020-08-31 06:57:16 -04:00
parent 39a9974f3d
commit 33f2cbc713
No known key found for this signature in database
GPG Key ID: D9B44E93BF0C375A
5 changed files with 76 additions and 3 deletions

View File

@ -424,6 +424,8 @@ INPUT = @LAMMPS_SOURCE_DIR@/utils.cpp \
@LAMMPS_SOURCE_DIR@/input.h \
@LAMMPS_SOURCE_DIR@/tokenizer.cpp \
@LAMMPS_SOURCE_DIR@/tokenizer.h \
@LAMMPS_SOURCE_DIR@/text_file_reader.cpp \
@LAMMPS_SOURCE_DIR@/text_file_reader.h \
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded

View File

@ -762,6 +762,8 @@ on reading or an unexpected end-of-file state was reached. In that
case, the functions will stop the calculation with an error message,
indicating the name of the problematic file, if possible.
----------
.. doxygenfunction:: sfgets
:project: progguide
@ -785,6 +787,8 @@ return the result of a partial conversion or zero in cases where the
string is not a valid number. This behavior allows to more easily detect
typos or issues when processing input files.
----------
.. doxygenfunction:: numeric
:project: progguide
@ -803,6 +807,8 @@ String processing functions
The following are functions to help with processing strings
and parsing files or arguments.
----------
.. doxygenfunction:: trim
:project: progguide
@ -995,3 +1001,17 @@ This code example should produce the following output:
:project: progguide
:members: what
File reader classes
====================
The purpose of the file reader classes is to simplify the recurring task
of reading and parsing files. They are built on top of the tokenizer
classes discussed in the previous section, but include operations that
are optimized for processing larger chunks of data more efficiently.
----------
.. doxygenclass:: LAMMPS_NS::TextFileReader
:project: progguide
:members:

View File

@ -28,6 +28,17 @@
using namespace LAMMPS_NS;
/** Class for reading and parsing text files
*
* The value of the class member variable *ignore_comments* controls
* whether any text following the pound sign (#) should be ignored (true)
* or not (false). Default: true, i.e. ignore.
*
* \param filename Name of file to be read
* \param filetype Description of file type for error messages
*
* \sa PotentialFileReader */
TextFileReader::TextFileReader(const std::string &filename, const std::string &filetype)
: filename(filename), filetype(filetype), ignore_comments(true)
{
@ -38,10 +49,14 @@ TextFileReader::TextFileReader(const std::string &filename, const std::string &f
}
}
/** Closes the file */
TextFileReader::~TextFileReader() {
fclose(fp);
}
/** Read the next line and ignore it */
void TextFileReader::skip_line() {
char *ptr = fgets(line, MAXLINE, fp);
if (ptr == nullptr) {
@ -50,6 +65,20 @@ void TextFileReader::skip_line() {
}
}
/** Read the next line(s) until *nparams* words have been read.
*
* This reads a line and counts the words in it, if the number
* is less than the requested number, it will read the next
* line, as well. Output will be a string with all read lines
* combined. The purpose is to somewhat replicate the reading
* behavior of formatted files in Fortran.
*
* If the *ignore_comments* class member has the value *true*,
* then any text read in is truncated at the first '#' character.
*
* \param nparams Number of words that must be read. Default: 0
* \return String with the concatenated text */
char *TextFileReader::next_line(int nparams) {
// concatenate lines until have nparams words
int n = 0;
@ -82,7 +111,6 @@ char *TextFileReader::next_line(int nparams) {
return nullptr;
}
// strip comment
if (ignore_comments && (ptr = strchr(line, '#'))) *ptr = '\0';
@ -97,6 +125,15 @@ char *TextFileReader::next_line(int nparams) {
return line;
}
/** Read lines until *n* doubles have been read and stored in array *list*
*
* This reads lines from the file using the next_line() function,
* and splits them into floating-point numbers using the
* ValueTokenizer class and stores the number is the provided list.
*
* \param list Pointer to array with suitable storage for *n* doubles
* \param n Number of doubles to be read */
void TextFileReader::next_dvector(double * list, int n) {
int i = 0;
while (i < n) {
@ -116,6 +153,16 @@ void TextFileReader::next_dvector(double * list, int n) {
}
}
/** Read text until *nparams* words are read and passed to a tokenizer object for custom parsing.
*
* This reads lines from the file using the next_line() function,
* and splits them into floating-point numbers using the
* ValueTokenizer class and stores the number is the provided list.
*
* \param nparams Number of words to be read
* \param separators String with list of separators.
* \return ValueTokenizer object for read in text */
ValueTokenizer TextFileReader::next_values(int nparams, const std::string & separators) {
return ValueTokenizer(next_line(nparams), separators);
}

View File

@ -33,7 +33,7 @@ namespace LAMMPS_NS
FILE *fp;
public:
bool ignore_comments;
bool ignore_comments; //!< Controls whether comments are ignored
TextFileReader(const std::string &filename, const std::string &filetype);
~TextFileReader();

View File

@ -35,7 +35,8 @@ TokenizerException::TokenizerException(const std::string & msg, const std::strin
* This tokenizer will break down a string into sub-strings (i.e words)
* separated by the given separator characters.
*
* \exception TokenizerException
* \param str string to be processed
* \param separators string with separator characters (default: " \t\r\n\f")
*
* \sa ValueTokenizer TokenizerException */
@ -148,6 +149,9 @@ std::vector<std::string> Tokenizer::as_vector() {
}
/*! Class for reading text with numbers
*
* \param str string to be processed
* \param separators string with separator characters (default: " \t\r\n\f")
*
* \sa Tokenizer InvalidIntegerException InvalidFloatException */