2323# SOFTWARE.
2424#
2525###############################################################################
26- from typing import Optional
26+ from collections import deque
27+ from concurrent .futures import ThreadPoolExecutor , as_completed
28+ from typing import Any , Optional
29+ from urllib .parse import urlparse
2730
2831from nodescraper .base import RedfishDataCollector
32+ from nodescraper .connection .redfish import RedfishConnection , RedfishGetResult
2933from nodescraper .enums import EventCategory , EventPriority , ExecutionStatus
3034from nodescraper .models import TaskResult
3135
3236from .collector_args import RedfishEndpointCollectorArgs
3337from .endpoint_data import RedfishEndpointDataModel
3438
39+ ODATA_ID = "@odata.id"
40+ MEMBERS = "Members"
41+
42+
43+ def _normalize_path (odata_id : str , api_root : str ) -> str :
44+ """Convert @odata.id value (URL or path) to a normalized path under api_root."""
45+ if not odata_id or not isinstance (odata_id , str ):
46+ return ""
47+ s = odata_id .strip ()
48+ if s .startswith (("http://" , "https://" )):
49+ parsed = urlparse (s )
50+ s = parsed .path or "/"
51+ if not s .startswith ("/" ):
52+ s = "/" + s
53+ s = s .rstrip ("/" ) or "/"
54+ api_root_norm = api_root .strip ("/" )
55+ if api_root_norm and not s .startswith ("/" + api_root_norm ):
56+ return ""
57+ return s
58+
59+
60+ def _extract_odata_ids (obj : Any ) -> list [str ]:
61+ """Recursively extract all @odata.id values from a Redfish JSON body."""
62+ out : list [str ] = []
63+ if isinstance (obj , dict ):
64+ if ODATA_ID in obj and isinstance (obj [ODATA_ID ], str ):
65+ out .append (obj [ODATA_ID ])
66+ for k , v in obj .items ():
67+ if k == MEMBERS and isinstance (v , list ):
68+ for item in v :
69+ if (
70+ isinstance (item , dict )
71+ and ODATA_ID in item
72+ and isinstance (item [ODATA_ID ], str )
73+ ):
74+ out .append (item [ODATA_ID ])
75+ elif isinstance (v , dict ):
76+ out .extend (_extract_odata_ids (v ))
77+ elif isinstance (v , list ):
78+ for item in v :
79+ if isinstance (item , dict ):
80+ out .extend (_extract_odata_ids (item ))
81+ return out
82+
83+
84+ def _discover_tree (
85+ connection : RedfishConnection ,
86+ api_root : str ,
87+ max_depth : int ,
88+ max_endpoints : int ,
89+ ) -> tuple [list [str ], dict [str , dict ], list [RedfishGetResult ]]:
90+ """
91+ Traverse the Redfish resource tree from the service root.
92+
93+ max_depth matches collection_args.tree_max_depth: 1 = service root only; 2 = root + one link
94+ level; child links are only enqueued when depth + 1 < max_depth (root is depth 0).
95+ """
96+ root_path = _normalize_path (api_root , api_root ) or ("/" + api_root .strip ("/" ))
97+ seen : set [str ] = set ()
98+ to_visit : deque [tuple [str , int ]] = deque ([(root_path , 0 )])
99+ responses : dict [str , dict ] = {}
100+ results : list [RedfishGetResult ] = []
101+ while to_visit :
102+ if max_endpoints and len (seen ) >= max_endpoints :
103+ break
104+ path , depth = to_visit .popleft ()
105+ if path in seen or depth > max_depth :
106+ continue
107+ seen .add (path )
108+ res = connection .run_get (path )
109+ results .append (res )
110+ if res .success and res .data is not None :
111+ responses [path ] = res .data
112+ for odata_id in _extract_odata_ids (res .data ):
113+ link_path = _normalize_path (odata_id , api_root )
114+ # Follow only if the child depth stays strictly below max_depth (1 = root only).
115+ if link_path and link_path not in seen and depth + 1 < max_depth :
116+ to_visit .append ((link_path , depth + 1 ))
117+ return sorted (seen ), responses , results
118+
35119
36120def _uris_from_args (args : Optional [RedfishEndpointCollectorArgs ]) -> list [str ]:
37121 """Return list of URIs from collector args.uris."""
@@ -40,6 +124,18 @@ def _uris_from_args(args: Optional[RedfishEndpointCollectorArgs]) -> list[str]:
40124 return list (args .uris ) if args .uris else []
41125
42126
127+ def _discover_tree_enabled (args : Optional [RedfishEndpointCollectorArgs ]) -> bool :
128+ """True only when tree discovery is explicitly enabled (avoids string/other truthy junk)."""
129+ if args is None :
130+ return False
131+ return getattr (args , "discover_tree" , False ) is True
132+
133+
134+ def _fetch_one (connection_copy : RedfishConnection , path : str ) -> RedfishGetResult :
135+ """Run a single GET on a connection copy (used from worker threads)."""
136+ return connection_copy .run_get (path )
137+
138+
43139class RedfishEndpointCollector (
44140 RedfishDataCollector [RedfishEndpointDataModel , RedfishEndpointCollectorArgs ]
45141):
@@ -50,30 +146,106 @@ class RedfishEndpointCollector(
50146 def collect_data (
51147 self , args : Optional [RedfishEndpointCollectorArgs ] = None
52148 ) -> tuple [TaskResult , Optional [RedfishEndpointDataModel ]]:
53- """GET each configured Redfish URI via _run_redfish_get() and store the JSON response ."""
149+ """Collect via tree discovery, or via explicit URIs, or skip if neither is configured ."""
54150 uris = _uris_from_args (args )
151+ use_tree = _discover_tree_enabled (args )
152+
153+ # 1) Tree discovery: only when discover_tree is explicitly true
154+ if use_tree :
155+ api_root = getattr (self .connection , "api_root" , "redfish/v1" )
156+ max_depth = getattr (args , "tree_max_depth" , 2 ) if args else 2
157+ max_endpoints = (getattr (args , "tree_max_endpoints" , 0 ) or 0 ) if args else 0
158+ _paths , responses , results = _discover_tree (
159+ self .connection ,
160+ api_root ,
161+ max_depth = max_depth ,
162+ max_endpoints = max_endpoints ,
163+ )
164+ for res in results :
165+ self .result .artifacts .append (res )
166+ if not res .success and res .error :
167+ self ._log_event (
168+ category = EventCategory .RUNTIME ,
169+ description = f"Redfish GET failed during tree discovery for { res .path } : { res .error } " ,
170+ priority = EventPriority .WARNING ,
171+ console_log = True ,
172+ )
173+ if not responses :
174+ self .result .message = "No Redfish endpoints discovered from tree"
175+ self .result .status = ExecutionStatus .ERROR
176+ return self .result , None
177+ data = RedfishEndpointDataModel (responses = responses )
178+ self .result .message = f"Collected { len (responses )} Redfish endpoint(s) from tree"
179+ self .result .status = ExecutionStatus .OK
180+ return self .result , data
181+
182+ # 2) URI list: when discover_tree is false/absent and uris are provided
55183 if not uris :
56- self .result .message = "No Redfish URIs configured"
184+ self .result .message = (
185+ "No collection mode configured: set collection_args.discover_tree to true "
186+ "or provide collection_args.uris"
187+ )
57188 self .result .status = ExecutionStatus .NOT_RAN
58189 return self .result , None
59190
60- responses : dict [ str , dict ] = {}
191+ paths = []
61192 for uri in uris :
62- path = uri
193+ path = uri . strip () if uri else ""
63194 if not path :
64195 continue
65196 if not path .startswith ("/" ):
66197 path = "/" + path
67- res = self ._run_redfish_get (path , log_artifact = True )
68- if res .success and res .data is not None :
69- responses [res .path ] = res .data
70- else :
71- self ._log_event (
72- category = EventCategory .RUNTIME ,
73- description = f"Redfish GET failed for { path } : { res .error or 'unknown' } " ,
74- priority = EventPriority .WARNING ,
75- console_log = True ,
76- )
198+ paths .append (path )
199+
200+ max_workers = getattr (args , "max_workers" , 1 ) if args else 1
201+ max_workers = min (max_workers , len (paths ))
202+
203+ if max_workers <= 1 :
204+ # Sequential
205+ responses = {}
206+ for path in paths :
207+ res = self ._run_redfish_get (path , log_artifact = True )
208+ if res .success and res .data is not None :
209+ responses [res .path ] = res .data
210+ else :
211+ self ._log_event (
212+ category = EventCategory .RUNTIME ,
213+ description = f"Redfish GET failed for { path } : { res .error or 'unknown' } " ,
214+ priority = EventPriority .WARNING ,
215+ console_log = True ,
216+ )
217+ else :
218+ # Concurrent: one connection copy per worker, collect results in main thread
219+ responses = {}
220+ results = []
221+ with ThreadPoolExecutor (max_workers = max_workers ) as executor :
222+ futures = {}
223+ for path in paths :
224+ conn = self .connection .copy ()
225+ futures [executor .submit (_fetch_one , conn , path )] = path
226+ for future in as_completed (futures ):
227+ path = futures [future ]
228+ try :
229+ res = future .result ()
230+ results .append (res )
231+ if res .success and res .data is not None :
232+ responses [res .path ] = res .data
233+ else :
234+ self ._log_event (
235+ category = EventCategory .RUNTIME ,
236+ description = f"Redfish GET failed for { path } : { res .error or 'unknown' } " ,
237+ priority = EventPriority .WARNING ,
238+ console_log = True ,
239+ )
240+ except Exception as e :
241+ self ._log_event (
242+ category = EventCategory .RUNTIME ,
243+ description = f"Redfish GET failed for { path } : { e !s} " ,
244+ priority = EventPriority .WARNING ,
245+ console_log = True ,
246+ )
247+ for res in results :
248+ self .result .artifacts .append (res )
77249
78250 if not responses :
79251 self .result .message = "No Redfish endpoints could be read"
0 commit comments