1 """The f90nml namelist parser. 3 The ``Parser`` object converts the contents of a Fortran namelist into a 4 hierarchy of Python dicts containing equivalent intrinsic Python data types. 6 :copyright: Copyright 2014 Marshall Ward, see AUTHORS for details. 7 :license: Apache License, Version 2.0, see LICENSE for details. 9 from __future__
import print_function
12 from string
import whitespace
15 from f90nml.fpy import pyfloat, pycomplex, pybool, pystr
22 """Fortran namelist parser.""" 25 """Create the parser object.""" 44 """List of tokens used to designate comments in a namelist file. 46 Some Fortran programs will introduce alternative comment tokens (e.g. 47 ``#``) for internal preprocessing. 49 If you need to support these tokens, create a ``Parser`` object and set 50 the comment token as follows: 52 >>> parser = f90nml.Parser() 53 >>> parser.comment_tokens += '#' 54 >>> nml = Parser.read('sample.nml') 56 Be aware that this is non-standard Fortran and could mangle any strings 57 using the ``#`` characters. Characters inside string delimiters should 62 @comment_tokens.setter
64 """Validate and set the comment token string.""" 65 if not isinstance(value, str):
66 raise TypeError(
'comment_tokens attribute must be a string.')
71 """Assumed starting index for a vector (Default: 1). 73 Since Fortran allows users to set an arbitrary start index, it is not 74 always possible to assign an index to values when no index range has 77 For example, in the namelist ``idx.nml`` shown below, the index of the 78 values in the second assignment are ambiguous and depend on the 79 implicit starting index. 81 .. code-block:: fortran 88 The indices of the second entry in ``v`` are ambiguous. The result for 89 different values of ``default_start_index`` are shown below. 91 >>> parser = f90nml.Parser() 92 >>> parser.default_start_index = 1 93 >>> nml = parser.read('idx.nml') 94 >>> nml['idx_nml']['v'] 97 >>> parser.default_start_index = 0 98 >>> nml = parser.read('idx.nml') 99 >>> nml['idx_nml']['v'] 100 [1, 2, None, 3, 4, 5] 104 @default_start_index.setter
106 """Validate and set the default start index.""" 107 if not isinstance(value, int):
108 raise TypeError(
'default_start_index attribute must be of int ' 114 """Store unset rows of multidimensional arrays as empty lists. 116 Enabling this flag will replace rows of unset values with empty lists, 117 and will also not pad any existing rows when other rows are expanded. 119 This is not a true sparse representation, but rather is slightly more 120 sparse than the default dense array representation. 124 @sparse_arrays.setter
126 """Validate and enable spare arrays.""" 127 if not isinstance(value, bool):
128 raise TypeError(
'sparse_arrays attribute must be a logical type.')
133 """Define an explicit start index for all vectors. 135 When set to ``None``, vectors are assumed to start at the lowest 136 specified index. If no index appears in the namelist, then 137 ``default_start_index`` is used. 139 When ``global_start_index`` is set, then all vectors will be created 140 using this starting index. 142 For the namelist file ``idx.nml`` shown below, 144 .. code-block:: fortran 150 the following Python code behaves as shown below. 152 >>> parser = f90nml.Parser() 153 >>> nml = parser.read('idx.nml') 154 >>> nml['idx_nml']['v'] 157 >>> parser.global_start_index = 1 158 >>> nml = parser.read('idx.nml') 159 >>> nml['idx_nml']['v'] 160 [None, None, 3, 4, 5] 162 Currently, this property expects a scalar, and applies this value to 167 @global_start_index.setter
169 """Set the global start index.""" 170 if not isinstance(value, int)
and value
is not None:
171 raise TypeError(
'global_start_index attribute must be of int ' 177 """Read multidimensional arrays in row-major format. 179 Multidimensional array data contiguity is preserved by default, so that 180 column-major Fortran data is represented as row-major Python list of 183 The ``row_major`` flag will reorder the data to preserve the index 184 rules between Fortran to Python, but the data will be converted to 185 row-major form (with respect to Fortran). 191 """Validate and set row-major format for multidimensional arrays.""" 192 if value
is not None:
193 if not isinstance(value, bool):
195 'f90nml: error: row_major must be a logical value.')
201 """Use strict rules for parsing logical data value parsing. 203 The ``strict_logical`` flag will limit the parsing of non-delimited 204 logical strings as logical values. The default value is ``True``. 206 When ``strict_logical`` is enabled, only ``.true.``, ``.t.``, ``true``, 207 and ``t`` are interpreted as ``True``, and only ``.false.``, ``.f.``, 208 ``false``, and ``f`` are interpreted as false. 210 When ``strict_logical`` is disabled, any value starting with ``.t`` or 211 ``t`` is interpreted as ``True``, while any string starting with ``.f`` 212 or ``f`` is interpreted as ``False``, as described in the Fortran 213 specification. However, it can interfere with namelists which contain 214 strings which do not use delimiters. 218 @strict_logical.setter
220 """Validate and set the strict logical flag.""" 221 if value
is not None:
222 if not isinstance(value, bool):
224 'f90nml: error: strict_logical must be a logical value.')
228 def read(self, nml_fname, nml_patch_in=None, patch_fname=None):
229 """Parse a Fortran namelist file and store the contents. 231 >>> parser = f90nml.Parser() 232 >>> data_nml = parser.read('data.nml') 235 nml_is_path =
not hasattr(nml_fname,
'read')
236 patch_is_path =
not hasattr(patch_fname,
'read')
240 if not isinstance(nml_patch_in, dict):
241 raise ValueError(
'Input patch must be a dict or a Namelist.')
243 nml_patch = copy.deepcopy(
Namelist(nml_patch_in))
245 if not patch_fname
and nml_is_path:
246 patch_fname = nml_fname +
'~' 247 elif not patch_fname:
248 raise ValueError(
'f90nml: error: No output file for patch.')
249 elif nml_fname == patch_fname:
250 raise ValueError(
'f90nml: error: Patch filepath cannot be the ' 251 'same as the original filepath.')
253 self.
pfile = open(patch_fname,
'w')
255 self.
pfile = patch_fname
260 nml_file = open(nml_fname,
'r') if nml_is_path else nml_fname 269 if self.
pfile and patch_is_path:
272 def _readstream(self, nml_file, nml_patch):
273 """Parse an input stream containing a Fortran namelist.""" 276 for line
in nml_file:
277 toks = tokenizer.parse(line)
278 while tokenizer.prior_delim:
279 new_toks = tokenizer.parse(next(nml_file))
288 if new_toks[0].isspace():
289 toks[-1] += new_toks.pop(0)
293 toks[-1] += new_toks[0]
296 toks.extend(new_toks[1:])
301 self.
tokens = iter(f90lex)
308 except StopIteration:
315 if self.
token ==
'end':
319 while self.
token not in (
'&',
'$'):
322 except StopIteration:
333 grp_patch = nml_patch.get(g_name.lower(), {})
338 if self.
token not in (
'=',
'%',
'('):
342 if self.
token in (
'=',
'(',
'%'):
350 v_prior_values = g_vars[v_name]
353 g_vars[v_name] = v_values
360 if self.
token in (
'/',
'&',
'$'):
363 for v_name, v_val
in grp_patch.items():
364 g_vars[v_name] = v_val
365 v_strs = nmls._var_strings(v_name, v_val)
371 g_update = nmls[g_name]
374 if not isinstance(g_update, list):
375 g_update = [g_update]
377 g_update.append(g_vars)
382 nmls[g_name] = g_update
385 g_name, g_vars =
None,
None 389 except StopIteration:
394 def _parse_variable(self, parent, patch_nml=None):
395 """Parse a variable and return its name and values.""" 408 if self.
token ==
'(':
414 if v_name.lower()
in parent.start_index:
415 p_idx = parent.start_index[v_name.lower()]
417 for idx, pv
in enumerate(zip(p_idx, v_idx.first)):
418 if all(i
is None for i
in pv):
421 i_first = min(i
for i
in pv
if i
is not None)
423 v_idx.first[idx] = i_first
426 for i_p, i_v
in zip(p_idx, v_idx.first):
427 if i_p
is not None and i_v
is not None and i_v < i_p:
428 pad = [
None for _
in range(i_p - i_v)]
429 parent[v_name] = pad + parent[v_name]
437 for _
in v_idx.first]
439 parent.start_index[v_name.lower()] = v_idx.first
446 if self.
token ==
'%':
447 assert v_idx_bounds[0][1] - v_idx_bounds[0][0] == 1
448 dt_idx = v_idx_bounds[0][0] - v_idx.first[0]
460 if v_name
in parent.start_index:
461 p_start = parent.start_index[v_name.lower()]
465 for i_p, i_v
in zip(p_start, v_start):
467 pad = [
None for _
in range(i_p - i_v)]
468 parent[v_name] = pad + parent[v_name]
470 parent.start_index[v_name.lower()] = v_start
472 if self.
token ==
'%':
478 if v_name
in patch_nml:
479 v_patch_nml = patch_nml.pop(v_name.lower())
482 vpar = parent.get(v_name.lower())
483 if vpar
and isinstance(vpar, list):
484 assert dt_idx
is not None 486 v_parent = vpar[dt_idx]
495 parent[v_name] = v_parent
502 patch_nml=v_patch_nml
506 next_value[v_att] = v_att_vals
512 assert self.
token ==
'=' 520 if v_name
in patch_nml:
521 patch_values = patch_nml.pop(v_name.lower())
523 if not isinstance(patch_values, list):
524 patch_values = [patch_values]
529 while (self.
token not in (
'=',
'(',
'%')
or 533 if self.
token ==
'*':
535 assert isinstance(n_vals, int)
542 if (self.
token in (
',',
'/',
'&',
'$')
and 544 self.
token in (
'/',
'&',
'$'))):
549 if self.
token not in (
'/',
'&',
'$'):
552 if (self.
token ==
'=' or (self.
token in (
'/',
'&',
'$')
and 568 if self.
token in (
'/',
'&',
'$',
'='):
572 if (p_idx < len(patch_values)
and 573 len(patch_values) > 0
and self.
token !=
','):
574 p_val = patch_values[p_idx]
575 p_repr = patch_nml._f90repr(patch_values[p_idx])
578 if isinstance(p_val, complex):
588 skip = (p_idx >= len(patch_values))
594 v_values = patch_values
597 v_values =
delist(v_values)
599 return v_name, v_values
601 def _parse_indices(self):
602 """Parse a sequence of Fortran vector indices as a list of tuples.""" 606 while self.
token in (
',',
'('):
611 def _parse_index(self, v_name):
612 """Parse Fortran vector indices into a tuple of Python indices.""" 613 i_start = i_end = i_stride =
None 618 i_start = int(self.
token)
621 if self.
token in (
',',
')'):
622 raise ValueError(
'{0} index cannot be empty.'.format(v_name))
623 elif not self.
token ==
':':
627 if self.
token ==
':':
630 i_end = 1 + int(self.
token)
633 if self.
token ==
':':
634 raise ValueError(
'{0} end index cannot be implicit ' 635 'when using stride.'.format(v_name))
636 elif self.
token not in (
',',
')'):
638 elif self.
token in (
',',
')'):
644 if self.
token ==
':':
647 i_stride = int(self.
token)
649 if self.
token ==
')':
650 raise ValueError(
'{0} stride index cannot be ' 651 'implicit.'.format(v_name))
656 raise ValueError(
'{0} stride index cannot be zero.' 661 if self.
token not in (
',',
')'):
662 raise ValueError(
'{0} index did not terminate ' 663 'correctly.'.format(v_name))
665 idx_triplet = (i_start, i_end, i_stride)
668 def _parse_value(self, write_token=True, override=None):
669 """Convert string repr of Fortran type to equivalent Python type.""" 677 assert self.
token ==
',' 683 assert self.
token ==
')' 686 v_str =
'({0}, {1})'.format(v_re, v_im)
688 recast_funcs = [int, pyfloat, pycomplex, pybool, pystr]
690 for f90type
in recast_funcs:
693 if f90type == pybool:
696 value = f90type(v_str)
701 def _update_tokens(self, write_token=True, override=None,
703 """Update tokens to the next available values.""" 704 next_token = next(self.
tokens)
709 if self.
pfile and write_token:
710 token = override
if override
else self.
token 716 while not next_token ==
'\n':
717 patch_tokens += next_token
718 next_token = next(self.
tokens)
719 patch_tokens += next_token
724 next_token = next(self.
tokens)
725 except StopIteration:
726 if not patch_skip
or next_token
in (
'=',
'(',
'%'):
727 patch_tokens = patch_value + patch_tokens
734 if not patch_skip
or next_token
in (
'=',
'(',
'%'):
735 patch_tokens = patch_value + patch_tokens
743 def _append_value(self, v_values, next_value, v_idx=None, n_vals=1):
744 """Update a list of parsed values with a new value.""" 745 for _
in range(n_vals):
749 except StopIteration:
752 if next_value
is not None:
753 print(
'f90nml: warning: Value {0} is not assigned to ' 754 'any variable and has been removed.' 755 ''.format(next_value), file=sys.stderr)
761 for idx
in v_idx.first]
777 for (i_v, i_s)
in zip(v_i[:-1], v_s[:-1]):
779 v_subval = v_subval[i_v - i_s]
782 v_subval.extend([]
for _
in range(size, i_v - i_s + 1))
783 v_subval = v_subval[i_v - i_s]
786 i_v, i_s = v_i[-1], v_s[-1]
788 v_subval[i_v - i_s] = next_value
791 v_subval.extend(
None for _
in range(size, i_v - i_s + 1))
792 v_subval[i_v - i_s] = next_value
794 v_values.append(next_value)
800 """Expand lists in multidimensional arrays to pad unset values.""" 805 v.extend([[]
for _
in range(len(v), i_v - i_s + 1)])
811 v.extend([
None for _
in range(len(v), i_v - i_s + 1)])
815 """Merge two lists or dicts into a single element.""" 816 if isinstance(src, dict)
and isinstance(new, dict):
819 if not isinstance(src, list):
821 if not isinstance(new, list):
828 """Update a value list with a list of new or updated values.""" 829 l_min, l_max = (src, new)
if len(src) < len(new)
else (new, src)
831 l_min.extend(
None for i
in range(len(l_min), len(l_max)))
833 for i, val
in enumerate(new):
834 if isinstance(val, dict)
and isinstance(src[i], dict):
836 elif isinstance(val, list)
and isinstance(src[i], list):
838 elif val
is not None:
847 """Merge contents of dict `patch` into `src`.""" 850 if isinstance(src[key], dict)
and isinstance(patch[key], dict):
855 src[key] = patch[key]
861 """Reduce lists of zero or one elements to individual values.""" 862 assert isinstance(values, list)
866 elif len(values) == 1:
def _parse_index(self, v_name)
def _parse_value(self, write_token=True, override=None)
def _parse_variable(self, parent, patch_nml=None)
def merge_values(src, new)
def _append_value(self, v_values, next_value, v_idx=None, n_vals=1)
def merge_dicts(src, patch)
def _readstream(self, nml_file, nml_patch)
def read(self, nml_fname, nml_patch_in=None, patch_fname=None)
def default_start_index(self)
def write(nml, nml_path, force=False, sort=False)
def merge_lists(src, new)
def _update_tokens(self, write_token=True, override=None, patch_skip=False)
def global_start_index(self)