statement_splitter.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2009-2018 the sqlparse authors and contributors
  4. # <see AUTHORS file>
  5. #
  6. # This module is part of python-sqlparse and is released under
  7. # the BSD License: https://opensource.org/licenses/BSD-3-Clause
  8. from sqlparse import sql, tokens as T
  9. class StatementSplitter(object):
  10. """Filter that split stream at individual statements"""
  11. def __init__(self):
  12. self._reset()
  13. def _reset(self):
  14. """Set the filter attributes to its default values"""
  15. self._in_declare = False
  16. self._is_create = False
  17. self._begin_depth = 0
  18. self.consume_ws = False
  19. self.tokens = []
  20. self.level = 0
  21. def _change_splitlevel(self, ttype, value):
  22. """Get the new split level (increase, decrease or remain equal)"""
  23. # parenthesis increase/decrease a level
  24. if ttype is T.Punctuation and value == '(':
  25. return 1
  26. elif ttype is T.Punctuation and value == ')':
  27. return -1
  28. elif ttype not in T.Keyword: # if normal token return
  29. return 0
  30. # Everything after here is ttype = T.Keyword
  31. # Also to note, once entered an If statement you are done and basically
  32. # returning
  33. unified = value.upper()
  34. # three keywords begin with CREATE, but only one of them is DDL
  35. # DDL Create though can contain more words such as "or replace"
  36. if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
  37. self._is_create = True
  38. return 0
  39. # can have nested declare inside of being...
  40. if unified == 'DECLARE' and self._is_create and self._begin_depth == 0:
  41. self._in_declare = True
  42. return 1
  43. if unified == 'BEGIN':
  44. self._begin_depth += 1
  45. if self._is_create:
  46. # FIXME(andi): This makes no sense.
  47. return 1
  48. return 0
  49. # Should this respect a preceding BEGIN?
  50. # In CASE ... WHEN ... END this results in a split level -1.
  51. # Would having multiple CASE WHEN END and a Assignment Operator
  52. # cause the statement to cut off prematurely?
  53. if unified == 'END':
  54. self._begin_depth = max(0, self._begin_depth - 1)
  55. return -1
  56. if (unified in ('IF', 'FOR', 'WHILE')
  57. and self._is_create and self._begin_depth > 0):
  58. return 1
  59. if unified in ('END IF', 'END FOR', 'END WHILE'):
  60. return -1
  61. # Default
  62. return 0
  63. def process(self, stream):
  64. """Process the stream"""
  65. EOS_TTYPE = T.Whitespace, T.Comment.Single
  66. # Run over all stream tokens
  67. for ttype, value in stream:
  68. # Yield token if we finished a statement and there's no whitespaces
  69. # It will count newline token as a non whitespace. In this context
  70. # whitespace ignores newlines.
  71. # why don't multi line comments also count?
  72. if self.consume_ws and ttype not in EOS_TTYPE:
  73. yield sql.Statement(self.tokens)
  74. # Reset filter and prepare to process next statement
  75. self._reset()
  76. # Change current split level (increase, decrease or remain equal)
  77. self.level += self._change_splitlevel(ttype, value)
  78. # Append the token to the current statement
  79. self.tokens.append(sql.Token(ttype, value))
  80. # Check if we get the end of a statement
  81. if self.level <= 0 and ttype is T.Punctuation and value == ';':
  82. self.consume_ws = True
  83. # Yield pending statement (if any)
  84. if self.tokens:
  85. yield sql.Statement(self.tokens)