DISPATCH
parser.py
1 """The f90nml namelist parser.
2 
3 The ``Parser`` object converts the contents of a Fortran namelist into a
4 hierarchy of Python dicts containing equivalent intrinsic Python data types.
5 
6 :copyright: Copyright 2014 Marshall Ward, see AUTHORS for details.
7 :license: Apache License, Version 2.0, see LICENSE for details.
8 """
9 from __future__ import print_function
10 
11 import copy
12 from string import whitespace
13 import sys
14 
15 from f90nml.fpy import pyfloat, pycomplex, pybool, pystr
16 from f90nml.namelist import Namelist
17 from f90nml.findex import FIndex
18 from f90nml.tokenizer import Tokenizer
19 
20 
21 class Parser(object):
22  """Fortran namelist parser."""
23 
24  def __init__(self):
25  """Create the parser object."""
26  # Token management
27  self.tokens = None
28  self.token = None
29  self.prior_token = None
30 
31  # Patching
32  self.pfile = None
33 
34  # Configuration
35  self._default_start_index = 1
36  self._global_start_index = None
37  self._comment_tokens = '!'
38  self._sparse_arrays = False
39  self._row_major = False
40  self._strict_logical = True
41 
42  @property
43  def comment_tokens(self):
44  """List of tokens used to designate comments in a namelist file.
45 
46  Some Fortran programs will introduce alternative comment tokens (e.g.
47  ``#``) for internal preprocessing.
48 
49  If you need to support these tokens, create a ``Parser`` object and set
50  the comment token as follows:
51 
52  >>> parser = f90nml.Parser()
53  >>> parser.comment_tokens += '#'
54  >>> nml = Parser.read('sample.nml')
55 
56  Be aware that this is non-standard Fortran and could mangle any strings
57  using the ``#`` characters. Characters inside string delimiters should
58  be protected.
59  """
60  return self._comment_tokens
61 
62  @comment_tokens.setter
63  def comment_tokens(self, value):
64  """Validate and set the comment token string."""
65  if not isinstance(value, str):
66  raise TypeError('comment_tokens attribute must be a string.')
67  self._comment_tokens = value
68 
69  @property
71  """Assumed starting index for a vector (Default: 1).
72 
73  Since Fortran allows users to set an arbitrary start index, it is not
74  always possible to assign an index to values when no index range has
75  been provided.
76 
77  For example, in the namelist ``idx.nml`` shown below, the index of the
78  values in the second assignment are ambiguous and depend on the
79  implicit starting index.
80 
81  .. code-block:: fortran
82 
83  &idx_nml
84  v(3:5) = 3, 4, 5
85  v = 1, 2
86  /
87 
88  The indices of the second entry in ``v`` are ambiguous. The result for
89  different values of ``default_start_index`` are shown below.
90 
91  >>> parser = f90nml.Parser()
92  >>> parser.default_start_index = 1
93  >>> nml = parser.read('idx.nml')
94  >>> nml['idx_nml']['v']
95  [1, 2, 3, 4, 5]
96 
97  >>> parser.default_start_index = 0
98  >>> nml = parser.read('idx.nml')
99  >>> nml['idx_nml']['v']
100  [1, 2, None, 3, 4, 5]
101  """
102  return self._default_start_index
103 
104  @default_start_index.setter
105  def default_start_index(self, value):
106  """Validate and set the default start index."""
107  if not isinstance(value, int):
108  raise TypeError('default_start_index attribute must be of int '
109  'type.')
110  self._default_start_index = value
111 
112  @property
113  def sparse_arrays(self):
114  """Store unset rows of multidimensional arrays as empty lists.
115 
116  Enabling this flag will replace rows of unset values with empty lists,
117  and will also not pad any existing rows when other rows are expanded.
118 
119  This is not a true sparse representation, but rather is slightly more
120  sparse than the default dense array representation.
121  """
122  return self._sparse_arrays
123 
124  @sparse_arrays.setter
125  def sparse_arrays(self, value):
126  """Validate and enable spare arrays."""
127  if not isinstance(value, bool):
128  raise TypeError('sparse_arrays attribute must be a logical type.')
129  self._sparse_arrays = value
130 
131  @property
133  """Define an explicit start index for all vectors.
134 
135  When set to ``None``, vectors are assumed to start at the lowest
136  specified index. If no index appears in the namelist, then
137  ``default_start_index`` is used.
138 
139  When ``global_start_index`` is set, then all vectors will be created
140  using this starting index.
141 
142  For the namelist file ``idx.nml`` shown below,
143 
144  .. code-block:: fortran
145 
146  &idx_nml
147  v(3:5) = 3, 4, 5
148  /
149 
150  the following Python code behaves as shown below.
151 
152  >>> parser = f90nml.Parser()
153  >>> nml = parser.read('idx.nml')
154  >>> nml['idx_nml']['v']
155  [3, 4, 5]
156 
157  >>> parser.global_start_index = 1
158  >>> nml = parser.read('idx.nml')
159  >>> nml['idx_nml']['v']
160  [None, None, 3, 4, 5]
161 
162  Currently, this property expects a scalar, and applies this value to
163  all dimensions.
164  """
165  return self._global_start_index
166 
167  @global_start_index.setter
168  def global_start_index(self, value):
169  """Set the global start index."""
170  if not isinstance(value, int) and value is not None:
171  raise TypeError('global_start_index attribute must be of int '
172  'type.')
173  self._global_start_index = value
174 
175  @property
176  def row_major(self):
177  """Read multidimensional arrays in row-major format.
178 
179  Multidimensional array data contiguity is preserved by default, so that
180  column-major Fortran data is represented as row-major Python list of
181  lists.
182 
183  The ``row_major`` flag will reorder the data to preserve the index
184  rules between Fortran to Python, but the data will be converted to
185  row-major form (with respect to Fortran).
186  """
187  return self._row_major
188 
189  @row_major.setter
190  def row_major(self, value):
191  """Validate and set row-major format for multidimensional arrays."""
192  if value is not None:
193  if not isinstance(value, bool):
194  raise ValueError(
195  'f90nml: error: row_major must be a logical value.')
196  else:
197  self._row_major = value
198 
199  @property
200  def strict_logical(self):
201  """Use strict rules for parsing logical data value parsing.
202 
203  The ``strict_logical`` flag will limit the parsing of non-delimited
204  logical strings as logical values. The default value is ``True``.
205 
206  When ``strict_logical`` is enabled, only ``.true.``, ``.t.``, ``true``,
207  and ``t`` are interpreted as ``True``, and only ``.false.``, ``.f.``,
208  ``false``, and ``f`` are interpreted as false.
209 
210  When ``strict_logical`` is disabled, any value starting with ``.t`` or
211  ``t`` is interpreted as ``True``, while any string starting with ``.f``
212  or ``f`` is interpreted as ``False``, as described in the Fortran
213  specification. However, it can interfere with namelists which contain
214  strings which do not use delimiters.
215  """
216  return self._strict_logical
217 
218  @strict_logical.setter
219  def strict_logical(self, value):
220  """Validate and set the strict logical flag."""
221  if value is not None:
222  if not isinstance(value, bool):
223  raise ValueError(
224  'f90nml: error: strict_logical must be a logical value.')
225  else:
226  self._strict_logical = value
227 
228  def read(self, nml_fname, nml_patch_in=None, patch_fname=None):
229  """Parse a Fortran namelist file and store the contents.
230 
231  >>> parser = f90nml.Parser()
232  >>> data_nml = parser.read('data.nml')
233  """
234  # For switching based on files versus paths
235  nml_is_path = not hasattr(nml_fname, 'read')
236  patch_is_path = not hasattr(patch_fname, 'read')
237 
238  # Convert patch data to a Namelist object
239  if nml_patch_in:
240  if not isinstance(nml_patch_in, dict):
241  raise ValueError('Input patch must be a dict or a Namelist.')
242 
243  nml_patch = copy.deepcopy(Namelist(nml_patch_in))
244 
245  if not patch_fname and nml_is_path:
246  patch_fname = nml_fname + '~'
247  elif not patch_fname:
248  raise ValueError('f90nml: error: No output file for patch.')
249  elif nml_fname == patch_fname:
250  raise ValueError('f90nml: error: Patch filepath cannot be the '
251  'same as the original filepath.')
252  if patch_is_path:
253  self.pfile = open(patch_fname, 'w')
254  else:
255  self.pfile = patch_fname
256  else:
257  nml_patch = Namelist()
258 
259  try:
260  nml_file = open(nml_fname, 'r') if nml_is_path else nml_fname
261  try:
262  return self._readstream(nml_file, nml_patch)
263 
264  # Close the files we opened on any exceptions within readstream
265  finally:
266  if nml_is_path:
267  nml_file.close()
268  finally:
269  if self.pfile and patch_is_path:
270  self.pfile.close()
271 
272  def _readstream(self, nml_file, nml_patch):
273  """Parse an input stream containing a Fortran namelist."""
274  tokenizer = Tokenizer()
275  f90lex = []
276  for line in nml_file:
277  toks = tokenizer.parse(line)
278  while tokenizer.prior_delim:
279  new_toks = tokenizer.parse(next(nml_file))
280 
281  # Skip empty lines
282  if not new_toks:
283  continue
284 
285  # The tokenizer always pre-tokenizes the whitespace (leftover
286  # behaviour from Fortran source parsing) so this must be added
287  # manually.
288  if new_toks[0].isspace():
289  toks[-1] += new_toks.pop(0)
290 
291  # Append the rest of the string (if present)
292  if new_toks:
293  toks[-1] += new_toks[0]
294 
295  # Attach the rest of the tokens
296  toks.extend(new_toks[1:])
297 
298  toks.append('\n')
299  f90lex.extend(toks)
300 
301  self.tokens = iter(f90lex)
302 
303  nmls = Namelist()
304 
305  # Attempt to get first token; abort on empty file
306  try:
307  self._update_tokens(write_token=False)
308  except StopIteration:
309  return nmls
310 
311  # TODO: Replace "while True" with an update_token() iterator
312  while True:
313  try:
314  # Check for classic group terminator
315  if self.token == 'end':
316  self._update_tokens()
317 
318  # Ignore tokens outside of namelist groups
319  while self.token not in ('&', '$'):
320  self._update_tokens()
321 
322  except StopIteration:
323  break
324 
325  # Create the next namelist
326  self._update_tokens()
327  g_name = self.token
328 
329  g_vars = Namelist()
330  v_name = None
331 
332  # TODO: Edit `Namelist` to support case-insensitive `get` calls
333  grp_patch = nml_patch.get(g_name.lower(), {})
334 
335  # Populate the namelist group
336  while g_name:
337 
338  if self.token not in ('=', '%', '('):
339  self._update_tokens()
340 
341  # Set the next active variable
342  if self.token in ('=', '(', '%'):
343 
344  v_name, v_values = self._parse_variable(
345  g_vars,
346  patch_nml=grp_patch
347  )
348 
349  if v_name in g_vars:
350  v_prior_values = g_vars[v_name]
351  v_values = merge_values(v_prior_values, v_values)
352 
353  g_vars[v_name] = v_values
354 
355  # Deselect variable
356  v_name = None
357  v_values = []
358 
359  # Finalise namelist group
360  if self.token in ('/', '&', '$'):
361 
362  # Append any remaining patched variables
363  for v_name, v_val in grp_patch.items():
364  g_vars[v_name] = v_val
365  v_strs = nmls._var_strings(v_name, v_val)
366  for v_str in v_strs:
367  self.pfile.write(' {0}\n'.format(v_str))
368 
369  # Append the grouplist to the namelist
370  if g_name in nmls:
371  g_update = nmls[g_name]
372 
373  # Update to list of groups
374  if not isinstance(g_update, list):
375  g_update = [g_update]
376 
377  g_update.append(g_vars)
378 
379  else:
380  g_update = g_vars
381 
382  nmls[g_name] = g_update
383 
384  # Reset state
385  g_name, g_vars = None, None
386 
387  try:
388  self._update_tokens()
389  except StopIteration:
390  break
391 
392  return nmls
393 
394  def _parse_variable(self, parent, patch_nml=None):
395  """Parse a variable and return its name and values."""
396  if not patch_nml:
397  patch_nml = Namelist()
398 
399  v_name = self.prior_token
400  v_values = []
401 
402  # Patch state
403  patch_values = None
404 
405  # Derived type parent index (see notes below)
406  dt_idx = None
407 
408  if self.token == '(':
409 
410  v_idx_bounds = self._parse_indices()
411  v_idx = FIndex(v_idx_bounds, self.global_start_index)
412 
413  # Update starting index against namelist record
414  if v_name.lower() in parent.start_index:
415  p_idx = parent.start_index[v_name.lower()]
416 
417  for idx, pv in enumerate(zip(p_idx, v_idx.first)):
418  if all(i is None for i in pv):
419  i_first = None
420  else:
421  i_first = min(i for i in pv if i is not None)
422 
423  v_idx.first[idx] = i_first
424 
425  # Resize vector based on starting index
426  for i_p, i_v in zip(p_idx, v_idx.first):
427  if i_p is not None and i_v is not None and i_v < i_p:
428  pad = [None for _ in range(i_p - i_v)]
429  parent[v_name] = pad + parent[v_name]
430 
431  else:
432  # If variable already existed without an index, then assume a
433  # 1-based index
434  # FIXME: Need to respect undefined `None` starting indexes?
435  if v_name in parent:
436  v_idx.first = [self.default_start_index
437  for _ in v_idx.first]
438 
439  parent.start_index[v_name.lower()] = v_idx.first
440 
441  self._update_tokens()
442 
443  # Derived type parent check
444  # NOTE: This assumes single-dimension derived type vectors
445  # (which I think is the only case supported in Fortran)
446  if self.token == '%':
447  assert v_idx_bounds[0][1] - v_idx_bounds[0][0] == 1
448  dt_idx = v_idx_bounds[0][0] - v_idx.first[0]
449 
450  # NOTE: This is the sensible play to call `parse_variable`
451  # but not yet sure how to implement it, so we currently pass
452  # along `dt_idx` to the `%` handler.
453 
454  else:
455  v_idx = None
456 
457  # If indexed variable already exists, then re-index this new
458  # non-indexed variable using the global start index
459 
460  if v_name in parent.start_index:
461  p_start = parent.start_index[v_name.lower()]
462  v_start = [self.default_start_index for _ in p_start]
463 
464  # Resize vector based on new starting index
465  for i_p, i_v in zip(p_start, v_start):
466  if i_v < i_p:
467  pad = [None for _ in range(i_p - i_v)]
468  parent[v_name] = pad + parent[v_name]
469 
470  parent.start_index[v_name.lower()] = v_start
471 
472  if self.token == '%':
473 
474  # Resolve the derived type
475 
476  # Check for value in patch
477  v_patch_nml = None
478  if v_name in patch_nml:
479  v_patch_nml = patch_nml.pop(v_name.lower())
480 
481  if parent:
482  vpar = parent.get(v_name.lower())
483  if vpar and isinstance(vpar, list):
484  assert dt_idx is not None
485  try:
486  v_parent = vpar[dt_idx]
487  except IndexError:
488  v_parent = Namelist()
489  elif vpar:
490  v_parent = vpar
491  else:
492  v_parent = Namelist()
493  else:
494  v_parent = Namelist()
495  parent[v_name] = v_parent
496 
497  self._update_tokens()
498  self._update_tokens()
499 
500  v_att, v_att_vals = self._parse_variable(
501  v_parent,
502  patch_nml=v_patch_nml
503  )
504 
505  next_value = Namelist()
506  next_value[v_att] = v_att_vals
507  self._append_value(v_values, next_value, v_idx)
508 
509  else:
510  # Construct the variable array
511 
512  assert self.token == '='
513  n_vals = None
514 
515  self._update_tokens()
516 
517  # Check if value is in the namelist patch
518  # TODO: Edit `Namelist` to support case-insensitive `pop` calls
519  # (Currently only a problem in PyPy2)
520  if v_name in patch_nml:
521  patch_values = patch_nml.pop(v_name.lower())
522 
523  if not isinstance(patch_values, list):
524  patch_values = [patch_values]
525 
526  p_idx = 0
527 
528  # Add variables until next variable trigger
529  while (self.token not in ('=', '(', '%') or
530  (self.prior_token, self.token) in (('=', '('), (',', '('))):
531 
532  # Check for repeated values
533  if self.token == '*':
534  n_vals = self._parse_value()
535  assert isinstance(n_vals, int)
536  self._update_tokens()
537  elif not n_vals:
538  n_vals = 1
539 
540  # First check for implicit null values
541  if self.prior_token in ('=', '%', ','):
542  if (self.token in (',', '/', '&', '$') and
543  not (self.prior_token == ',' and
544  self.token in ('/', '&', '$'))):
545  self._append_value(v_values, None, v_idx, n_vals)
546 
547  elif self.prior_token == '*':
548 
549  if self.token not in ('/', '&', '$'):
550  self._update_tokens()
551 
552  if (self.token == '=' or (self.token in ('/', '&', '$') and
553  self.prior_token == '*')):
554  next_value = None
555  else:
556  next_value = self._parse_value()
557 
558  self._append_value(v_values, next_value, v_idx, n_vals)
559 
560  else:
561  next_value = self._parse_value()
562  self._append_value(v_values, next_value, v_idx, n_vals)
563 
564  # Reset default repeat factor for subsequent values
565  n_vals = 1
566 
567  # Exit for end of nml group (/, &, $) or null broadcast (=)
568  if self.token in ('/', '&', '$', '='):
569  break
570  else:
571  if patch_values:
572  if (p_idx < len(patch_values) and
573  len(patch_values) > 0 and self.token != ','):
574  p_val = patch_values[p_idx]
575  p_repr = patch_nml._f90repr(patch_values[p_idx])
576  p_idx += 1
577  self._update_tokens(override=p_repr)
578  if isinstance(p_val, complex):
579  # Skip over the complex content
580  # NOTE: Assumes input and patch are complex
581  self._update_tokens(write_token=False)
582  self._update_tokens(write_token=False)
583  self._update_tokens(write_token=False)
584  self._update_tokens(write_token=False)
585 
586  else:
587  # Skip any values beyond the patch size
588  skip = (p_idx >= len(patch_values))
589  self._update_tokens(patch_skip=skip)
590  else:
591  self._update_tokens()
592 
593  if patch_values:
594  v_values = patch_values
595 
596  if not v_idx:
597  v_values = delist(v_values)
598 
599  return v_name, v_values
600 
601  def _parse_indices(self):
602  """Parse a sequence of Fortran vector indices as a list of tuples."""
603  v_name = self.prior_token
604  v_indices = []
605 
606  while self.token in (',', '('):
607  v_indices.append(self._parse_index(v_name))
608 
609  return v_indices
610 
611  def _parse_index(self, v_name):
612  """Parse Fortran vector indices into a tuple of Python indices."""
613  i_start = i_end = i_stride = None
614 
615  # Start index
616  self._update_tokens()
617  try:
618  i_start = int(self.token)
619  self._update_tokens()
620  except ValueError:
621  if self.token in (',', ')'):
622  raise ValueError('{0} index cannot be empty.'.format(v_name))
623  elif not self.token == ':':
624  raise
625 
626  # End index
627  if self.token == ':':
628  self._update_tokens()
629  try:
630  i_end = 1 + int(self.token)
631  self._update_tokens()
632  except ValueError:
633  if self.token == ':':
634  raise ValueError('{0} end index cannot be implicit '
635  'when using stride.'.format(v_name))
636  elif self.token not in (',', ')'):
637  raise
638  elif self.token in (',', ')'):
639  # Replace index with single-index range
640  if i_start:
641  i_end = 1 + i_start
642 
643  # Stride index
644  if self.token == ':':
645  self._update_tokens()
646  try:
647  i_stride = int(self.token)
648  except ValueError:
649  if self.token == ')':
650  raise ValueError('{0} stride index cannot be '
651  'implicit.'.format(v_name))
652  else:
653  raise
654 
655  if i_stride == 0:
656  raise ValueError('{0} stride index cannot be zero.'
657  ''.format(v_name))
658 
659  self._update_tokens()
660 
661  if self.token not in (',', ')'):
662  raise ValueError('{0} index did not terminate '
663  'correctly.'.format(v_name))
664 
665  idx_triplet = (i_start, i_end, i_stride)
666  return idx_triplet
667 
668  def _parse_value(self, write_token=True, override=None):
669  """Convert string repr of Fortran type to equivalent Python type."""
670  v_str = self.prior_token
671 
672  # Construct the complex string
673  if v_str == '(':
674  v_re = self.token
675 
676  self._update_tokens(write_token)
677  assert self.token == ','
678 
679  self._update_tokens(write_token)
680  v_im = self.token
681 
682  self._update_tokens(write_token)
683  assert self.token == ')'
684 
685  self._update_tokens(write_token, override)
686  v_str = '({0}, {1})'.format(v_re, v_im)
687 
688  recast_funcs = [int, pyfloat, pycomplex, pybool, pystr]
689 
690  for f90type in recast_funcs:
691  try:
692  # Unclever hack.. integrate this better
693  if f90type == pybool:
694  value = pybool(v_str, self.strict_logical)
695  else:
696  value = f90type(v_str)
697  return value
698  except ValueError:
699  continue
700 
701  def _update_tokens(self, write_token=True, override=None,
702  patch_skip=False):
703  """Update tokens to the next available values."""
704  next_token = next(self.tokens)
705 
706  patch_value = ''
707  patch_tokens = ''
708 
709  if self.pfile and write_token:
710  token = override if override else self.token
711  patch_value += token
712 
713  while next_token[0] in self.comment_tokens + whitespace:
714  if self.pfile:
715  if next_token[0] in self.comment_tokens:
716  while not next_token == '\n':
717  patch_tokens += next_token
718  next_token = next(self.tokens)
719  patch_tokens += next_token
720 
721  # Several sections rely on StopIteration to terminate token search
722  # If that occurs, dump the patched tokens immediately
723  try:
724  next_token = next(self.tokens)
725  except StopIteration:
726  if not patch_skip or next_token in ('=', '(', '%'):
727  patch_tokens = patch_value + patch_tokens
728 
729  if self.pfile:
730  self.pfile.write(patch_tokens)
731  raise
732 
733  # Write patched values and whitespace + comments to file
734  if not patch_skip or next_token in ('=', '(', '%'):
735  patch_tokens = patch_value + patch_tokens
736 
737  if self.pfile:
738  self.pfile.write(patch_tokens)
739 
740  # Update tokens, ignoring padding
741  self.token, self.prior_token = next_token, self.token
742 
743  def _append_value(self, v_values, next_value, v_idx=None, n_vals=1):
744  """Update a list of parsed values with a new value."""
745  for _ in range(n_vals):
746  if v_idx:
747  try:
748  v_i = next(v_idx)
749  except StopIteration:
750  # Repeating commas are null-statements and can be ignored
751  # Otherwise, we warn the user that this is a bad namelist
752  if next_value is not None:
753  print('f90nml: warning: Value {0} is not assigned to '
754  'any variable and has been removed.'
755  ''.format(next_value), file=sys.stderr)
756 
757  # There are more values than indices, so we stop here
758  break
759 
760  v_s = [self.default_start_index if idx is None else idx
761  for idx in v_idx.first]
762 
763  if not self.row_major:
764  v_i = v_i[::-1]
765  v_s = v_s[::-1]
766 
767  # Multidimensional arrays
768  if not self.sparse_arrays:
769  pad_array(v_values, list(zip(v_i, v_s)))
770 
771  # We iterate inside the v_values and inspect successively
772  # deeper lists within the list tree. If the requested index is
773  # missing, we re-size that particular entry.
774  # (NOTE: This is unnecessary when sparse_arrays is disabled.)
775 
776  v_subval = v_values
777  for (i_v, i_s) in zip(v_i[:-1], v_s[:-1]):
778  try:
779  v_subval = v_subval[i_v - i_s]
780  except IndexError:
781  size = len(v_subval)
782  v_subval.extend([] for _ in range(size, i_v - i_s + 1))
783  v_subval = v_subval[i_v - i_s]
784 
785  # On the deepest level, we explicitly assign the value
786  i_v, i_s = v_i[-1], v_s[-1]
787  try:
788  v_subval[i_v - i_s] = next_value
789  except IndexError:
790  size = len(v_subval)
791  v_subval.extend(None for _ in range(size, i_v - i_s + 1))
792  v_subval[i_v - i_s] = next_value
793  else:
794  v_values.append(next_value)
795 
796 
797 # Support functions
798 
799 def pad_array(v, idx):
800  """Expand lists in multidimensional arrays to pad unset values."""
801  i_v, i_s = idx[0]
802 
803  if len(idx) > 1:
804  # Append missing subarrays
805  v.extend([[] for _ in range(len(v), i_v - i_s + 1)])
806 
807  # Pad elements
808  for e in v:
809  pad_array(e, idx[1:])
810  else:
811  v.extend([None for _ in range(len(v), i_v - i_s + 1)])
812 
813 
814 def merge_values(src, new):
815  """Merge two lists or dicts into a single element."""
816  if isinstance(src, dict) and isinstance(new, dict):
817  return merge_dicts(src, new)
818  else:
819  if not isinstance(src, list):
820  src = [src]
821  if not isinstance(new, list):
822  new = [new]
823 
824  return merge_lists(src, new)
825 
826 
827 def merge_lists(src, new):
828  """Update a value list with a list of new or updated values."""
829  l_min, l_max = (src, new) if len(src) < len(new) else (new, src)
830 
831  l_min.extend(None for i in range(len(l_min), len(l_max)))
832 
833  for i, val in enumerate(new):
834  if isinstance(val, dict) and isinstance(src[i], dict):
835  new[i] = merge_dicts(src[i], val)
836  elif isinstance(val, list) and isinstance(src[i], list):
837  new[i] = merge_lists(src[i], val)
838  elif val is not None:
839  new[i] = val
840  else:
841  new[i] = src[i]
842 
843  return new
844 
845 
846 def merge_dicts(src, patch):
847  """Merge contents of dict `patch` into `src`."""
848  for key in patch:
849  if key in src:
850  if isinstance(src[key], dict) and isinstance(patch[key], dict):
851  merge_dicts(src[key], patch[key])
852  else:
853  src[key] = merge_values(src[key], patch[key])
854  else:
855  src[key] = patch[key]
856 
857  return src
858 
859 
860 def delist(values):
861  """Reduce lists of zero or one elements to individual values."""
862  assert isinstance(values, list)
863 
864  if not values:
865  return None
866  elif len(values) == 1:
867  return values[0]
868 
869  return values
def _parse_index(self, v_name)
Definition: parser.py:611
def _parse_value(self, write_token=True, override=None)
Definition: parser.py:668
def _parse_variable(self, parent, patch_nml=None)
Definition: parser.py:394
def merge_values(src, new)
Definition: parser.py:814
def _append_value(self, v_values, next_value, v_idx=None, n_vals=1)
Definition: parser.py:743
def merge_dicts(src, patch)
Definition: parser.py:846
def __init__(self)
Definition: parser.py:24
Definition: fpy.py:1
def delist(values)
Definition: parser.py:860
def sparse_arrays(self)
Definition: parser.py:113
def comment_tokens(self)
Definition: parser.py:43
def _readstream(self, nml_file, nml_patch)
Definition: parser.py:272
def strict_logical(self)
Definition: parser.py:200
def _parse_indices(self)
Definition: parser.py:601
def read(self, nml_fname, nml_patch_in=None, patch_fname=None)
Definition: parser.py:228
def row_major(self)
Definition: parser.py:176
def default_start_index(self)
Definition: parser.py:70
def pad_array(v, idx)
Definition: parser.py:799
def write(nml, nml_path, force=False, sort=False)
Definition: __init__.py:35
def merge_lists(src, new)
Definition: parser.py:827
def _update_tokens(self, write_token=True, override=None, patch_skip=False)
Definition: parser.py:702
def global_start_index(self)
Definition: parser.py:132