Coverage for sources / ictr / standard / linearizers.py: 94%

102 statements  

« prev     ^ index     » next       coverage.py v7.13.0, created at 2025-12-12 01:33 +0000

1# vim: set filetype=python fileencoding=utf-8: 

2# -*- coding: utf-8 -*- 

3 

4#============================================================================# 

5# # 

6# Licensed under the Apache License, Version 2.0 (the "License"); # 

7# you may not use this file except in compliance with the License. # 

8# You may obtain a copy of the License at # 

9# # 

10# http://www.apache.org/licenses/LICENSE-2.0 # 

11# # 

12# Unless required by applicable law or agreed to in writing, software # 

13# distributed under the License is distributed on an "AS IS" BASIS, # 

14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # 

15# See the License for the specific language governing permissions and # 

16# limitations under the License. # 

17# # 

18#============================================================================# 

19 

20 

21''' Conversion of objects to lines of text. ''' 

22 

23 

24from . import __ 

25from . import core as _core 

26 

27 

28class Linearizer( __.Linearizer ): 

29 

30 configuration: __.typx.Annotated[ 

31 _core.LinearizerConfiguration, 

32 __.ddoc.Doc( ''' Default behaviors for textual linearizer. ''' ), 

33 ] = __.dcls.field( default_factory = _core.LinearizerConfiguration ) 

34 

35 def __call__( 

36 self, 

37 control: __.TextualizationControl, 

38 entity: object, 

39 columns_max: __.Absential[ int ] = __.absent, 

40 ) -> tuple[ str, ... ]: 

41 auxdata = _core.LinearizerState.from_configuration( 

42 configuration = self.configuration, control = control ) 

43 return linearize_omni( auxdata, entity, columns_max = columns_max ) 

44 

45 

46def linearize_exception_plain( 

47 auxdata: _core.LinearizerState, 

48 exception: BaseException, 

49 columns_max: __.Absential[ int ] = __.absent, 

50) -> tuple[ str, ... ]: 

51 ecfg = auxdata.configuration.exceptionscfg 

52 tbe = __.tb.TracebackException.from_exception( exception ) 

53 lines = [ *ecfg.interpolate( exception ) ] 

54 if ecfg.enable_stacktraces: 

55 lines.extend( 

56 linearize_stacktrace_plain( auxdata, tbe.stack, columns_max ) ) 

57 # TODO: Process '__cause__' and '__context__'. 

58 # TODO: Process exception groups. 

59 return tuple( lines ) 

60 

61 

62def linearize_exception_rich( 

63 auxdata: _core.LinearizerState, 

64 exception: BaseException, 

65 columns_max: __.Absential[ int ] = __.absent, 

66) -> tuple[ str, ... ]: 

67 # TODO: Ensure that exception groups are handled properly. 

68 ecfg = auxdata.configuration.exceptionscfg 

69 capture = __.io.StringIO( ) 

70 console = __.produce_rich_console( auxdata.control, capture, columns_max ) 

71 if not ecfg.enable_stacktraces: 

72 console.print( exception ) 

73 return tuple( capture.getvalue( ).split( '\n' ) ) 

74 traceback = __.rich_traceback.Traceback.from_exception( 

75 type( exception ), exception, exception.__traceback__ ) 

76 console.print( traceback ) 

77 return tuple( capture.getvalue( ).split( '\n' ) ) 

78 

79 

80def linearize_object_plain( 

81 auxdata: _core.LinearizerState, 

82 entity: object, 

83 columns_max: __.Absential[ int ] = __.absent, 

84) -> tuple[ str, ... ]: 

85 # TODO? Pass configurable indentation width. 

86 text = ( 

87 __.pprint.saferepr( entity ) if __.is_absent( columns_max ) 

88 else __.pprint.pformat( entity, indent = 2, width = columns_max ) ) 

89 return tuple( text.split( '\n' ) ) 

90 

91 

92def linearize_object_rich( 

93 auxdata: _core.LinearizerState, 

94 entity: object, 

95 columns_max: __.Absential[ int ] = __.absent, 

96) -> tuple[ str, ... ]: 

97 capture = __.io.StringIO( ) 

98 console = __.produce_rich_console( auxdata.control, capture, columns_max ) 

99 console.print( entity ) 

100 return tuple( capture.getvalue( ).split( '\n' ) ) 

101 

102 

103def linearize_omni( 

104 auxdata: _core.LinearizerState, 

105 entity: object, 

106 columns_max: __.Absential[ int ] = __.absent, 

107) -> tuple[ str, ... ]: 

108 if auxdata.colorize: 

109 return linearize_omni_rich( auxdata, entity, columns_max ) 

110 return linearize_omni_plain( auxdata, entity, columns_max ) 

111 

112 

113def linearize_omni_plain( 

114 auxdata: _core.LinearizerState, 

115 entity: object, 

116 columns_max: __.Absential[ int ] = __.absent, 

117) -> tuple[ str, ... ]: 

118 if isinstance( entity, str ): 

119 return linearize_text_plain( auxdata, entity, columns_max ) 

120 if isinstance( entity, BaseException ): 

121 return linearize_exception_plain( auxdata, entity, columns_max ) 

122 return linearize_object_plain( auxdata, entity, columns_max ) 

123 

124 

125def linearize_omni_rich( 

126 auxdata: _core.LinearizerState, 

127 entity: object, 

128 columns_max: __.Absential[ int ] = __.absent, 

129) -> tuple[ str, ... ]: 

130 if isinstance( entity, str ): 

131 return linearize_text_rich( auxdata, entity, columns_max ) 

132 if isinstance( entity, BaseException ): 132 ↛ 133line 132 didn't jump to line 133 because the condition on line 132 was never true

133 return linearize_exception_rich( auxdata, entity, columns_max ) 

134 return linearize_object_rich( auxdata, entity, columns_max ) 

135 

136 

137def linearize_stacktrace_plain( 

138 auxdata: _core.LinearizerState, 

139 stacktrace: __.tb.StackSummary, 

140 columns_max: __.Absential[ int ] = __.absent, 

141) -> tuple[ str, ... ]: 

142 infinite_lines = __.is_absent( columns_max ) 

143 lines: list[ str ] = [ ] 

144 for frame in stacktrace: 

145 filename_part = f"File '{frame.filename}'" 

146 lineno_part = f"line {frame.lineno}" if frame.lineno else '' 

147 name_part = f"in {frame.name}" 

148 parts = ( filename_part, lineno_part, name_part ) 

149 address = ', '.join( filter( None, parts ) ) 

150 address_size = len( address ) 

151 if infinite_lines or address_size <= columns_max: 

152 lines.append( address ) 

153 else: 

154 excess_size = address_size - columns_max - 2 # sans ', ' 

155 parts = ( lineno_part, name_part ) 

156 address = ', '.join( filter( None, parts ) ) 

157 address_size = len( address ) 

158 if excess_size <= address_size: 158 ↛ 159line 158 didn't jump to line 159 because the condition on line 158 was never true

159 lines.append( filename_part ) 

160 else: 

161 excess_size = columns_max - len( frame.filename ) + 4 

162 filename = frame.filename[ excess_size : ] 

163 # TODO? Drop middle rather than start. 

164 lines.append( f"File '... {filename}'" ) 

165 lines.append( address ) 

166 if frame.line: 166 ↛ 144line 166 didn't jump to line 144 because the condition on line 166 was always true

167 line = frame.line.strip( ) 

168 # TODO? Apply Pygments to line. 

169 lines_ = iter( 

170 linearize_text_plain( 

171 auxdata, line, 

172 __.absent if infinite_lines else columns_max - 4 ) ) 

173 lines.append( " {}".format( next( lines_ ) ) ) 

174 lines.extend( f" {line_}" for line_ in lines_ ) 

175 return tuple( lines ) 

176 

177 

178def linearize_stacktrace_rich( 

179 auxdata: _core.LinearizerState, 

180 stacktrace: __.tb.StackSummary, 

181 columns_max: __.Absential[ int ] = __.absent, 

182) -> tuple[ str, ... ]: 

183 frames = [ 

184 __.rich_traceback.Frame( 

185 frame.filename, frame.lineno or -1, frame.name, frame.line or '' ) 

186 for frame in stacktrace ] 

187 stack = __.rich_traceback.Stack( 

188 exc_type = 'Callstack', exc_value = 'Inspection', frames = frames ) 

189 trace = __.rich_traceback.Trace( stacks = [ stack ] ) 

190 traceback = __.rich_traceback.Traceback( trace = trace ) 

191 capture = __.io.StringIO( ) 

192 console = __.produce_rich_console( auxdata.control, capture, columns_max ) 

193 console.print( traceback ) 

194 # TODO? Remove exception lines. 

195 return tuple( capture.getvalue( ).split( '\n' ) ) 

196 

197 

198def linearize_text_plain( 

199 auxdata: _core.LinearizerState, 

200 text: str, 

201 columns_max: __.Absential[ int ] = __.absent, 

202) -> tuple[ str, ... ]: 

203 text_no_ansi = __.remove_ansi_c1_sequences( text ) 

204 if __.is_absent( columns_max ): 

205 return tuple( text_no_ansi.split( '\n' ) ) 

206 configuration = auxdata.configuration 

207 incise_excesses = ( 

208 configuration.incision_boundary 

209 is not _core.IncisionBoundaries.Nowhere ) 

210 incise_naturally = ( 

211 configuration.incision_boundary 

212 is _core.IncisionBoundaries.Wordsplits ) 

213 # TODO? Account for wide characters. 

214 return tuple( __.textwrap.wrap( 

215 text_no_ansi, 

216 break_long_words = incise_excesses, 

217 break_on_hyphens = incise_naturally, 

218 width = columns_max ) ) 

219 

220 

221def linearize_text_rich( 

222 auxdata: _core.LinearizerState, 

223 text: str, 

224 columns_max: __.Absential[ int ] = __.absent, 

225) -> tuple[ str, ... ]: 

226 configuration = auxdata.configuration 

227 text_ = __.rich_text.Text.from_ansi( text ) 

228 infinite_lines = __.is_absent( columns_max ) 

229 incise = ( 

230 not infinite_lines 

231 and configuration.incision_boundary 

232 is not _core.IncisionBoundaries.Nowhere ) 

233 capture = __.io.StringIO( ) 

234 console = __.produce_rich_console( auxdata.control, capture, columns_max ) 

235 console.print( 

236 text_, 

237 overflow = 'ignore' if infinite_lines else 'fold', 

238 no_wrap = not incise ) 

239 return tuple( capture.getvalue( ).split( '\n' ) )