Coverage for sources/librovore/structures/sphinx/urls.py: 19%
34 statements
« prev ^ index » next coverage.py v7.10.4, created at 2025-08-17 23:43 +0000
« prev ^ index » next coverage.py v7.10.4, created at 2025-08-17 23:43 +0000
1# vim: set filetype=python fileencoding=utf-8:
2# -*- coding: utf-8 -*-
4#============================================================================#
5# #
6# Licensed under the Apache License, Version 2.0 (the "License"); #
7# you may not use this file except in compliance with the License. #
8# You may obtain a copy of the License at #
9# #
10# http://www.apache.org/licenses/LICENSE-2.0 #
11# #
12# Unless required by applicable law or agreed to in writing, software #
13# distributed under the License is distributed on an "AS IS" BASIS, #
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
15# See the License for the specific language governing permissions and #
16# limitations under the License. #
17# #
18#============================================================================#
21''' URL manipulation and normalization functions. '''
24import urllib.parse as _urlparse
26from urllib.parse import ParseResult as _Url
28from . import __
31def normalize_base_url( source: str ) -> __.typx.Annotated[
32 _Url,
33 __.ddoc.Doc(
34 ''' Normalized base URL without trailing slash.
36 Extracts clean base documentation URL from any source.
37 Handles URLs, file paths, and directories consistently.
38 ''' )
39]:
40 ''' Extracts clean base documentation URL from any source. '''
41 try: url = _urlparse.urlparse( source )
42 except Exception as exc:
43 raise __.InventoryUrlInvalidity( source ) from exc
44 match url.scheme:
45 case '':
46 path = __.Path( source )
47 if path.is_file( ) or ( not path.exists( ) and path.suffix ):
48 path = path.parent
49 url = _urlparse.urlparse( path.resolve( ).as_uri( ) )
50 case 'http' | 'https' | 'file': pass
51 case _: raise __.InventoryUrlInvalidity( source )
52 path = url.path.rstrip( '/' )
53 return _urlparse.ParseResult(
54 scheme = url.scheme, netloc = url.netloc, path = path,
55 params = '', query = '', fragment = '' )
58def derive_documentation_url(
59 base_url: _Url, object_uri: str, object_name: str
60) -> _Url:
61 ''' Derives documentation URL from base URL ParseResult and object URI. '''
62 uri_with_name = object_uri.replace( '$', object_name )
63 if '#' in uri_with_name:
64 path_part, fragment_part = uri_with_name.split( '#', 1 )
65 new_path = f"{base_url.path}/{path_part}"
66 return base_url._replace( path = new_path, fragment = fragment_part )
67 new_path = f"{base_url.path}/{uri_with_name}"
68 return base_url._replace( path = new_path )
71def derive_html_url( base_url: _Url ) -> _Url:
72 ''' Derives index.html URL from base URL ParseResult. '''
73 new_path = f"{base_url.path}/index.html"
74 return base_url._replace( path = new_path )
77def derive_inventory_url( base_url: _Url ) -> _Url:
78 ''' Derives objects.inv URL from base URL ParseResult. '''
79 new_path = f"{base_url.path}/objects.inv"
80 return base_url._replace( path = new_path )
83def derive_searchindex_url( base_url: _Url ) -> _Url:
84 ''' Derives searchindex.js URL from base URL ParseResult. '''
85 new_path = f"{base_url.path}/searchindex.js"
86 return base_url._replace( path = new_path )