Safemotion Lib
Loading...
Searching...
No Matches
Functions | Variables
fastreid.evaluation.testing Namespace Reference

Functions

 print_csv_format (results)
 
 verify_results (cfg, results)
 
 flatten_results_dict (results)
 

Variables

 logger = logging.getLogger(__name__)
 

Function Documentation

◆ flatten_results_dict()

fastreid.evaluation.testing.flatten_results_dict ( results)
Expand a hierarchical dict of scalars into a flat dict of scalars.
If results[k1][k2][k3] = v, the returned dict will have the entry
{"k1/k2/k3": v}.
Args:
    results (dict):

Definition at line 73 of file testing.py.

73def flatten_results_dict(results):
74 """
75 Expand a hierarchical dict of scalars into a flat dict of scalars.
76 If results[k1][k2][k3] = v, the returned dict will have the entry
77 {"k1/k2/k3": v}.
78 Args:
79 results (dict):
80 """
81 r = {}
82 for k, v in results.items():
83 if isinstance(v, Mapping):
84 v = flatten_results_dict(v)
85 for kk, vv in v.items():
86 r[k + "/" + kk] = vv
87 else:
88 r[k] = v
89 return r

◆ print_csv_format()

fastreid.evaluation.testing.print_csv_format ( results)
Print main metrics in a format similar to Detectron,
so that they are easy to copypaste into a spreadsheet.
Args:
    results (OrderedDict[dict]): task_name -> {metric -> score}

Definition at line 14 of file testing.py.

14def print_csv_format(results):
15 """
16 Print main metrics in a format similar to Detectron,
17 so that they are easy to copypaste into a spreadsheet.
18 Args:
19 results (OrderedDict[dict]): task_name -> {metric -> score}
20 """
21 assert isinstance(results, OrderedDict), results # unordered results cannot be properly printed
22 task = list(results.keys())[0]
23 metrics = ["Datasets"] + [k for k in results[task]]
24
25 csv_results = []
26 for task, res in results.items():
27 csv_results.append((task, *list(res.values())))
28
29 # tabulate it
30 table = tabulate(
31 csv_results,
32 tablefmt="pipe",
33 floatfmt=".2%",
34 headers=metrics,
35 numalign="left",
36 )
37
38 logger.info("Evaluation results in csv format: \n" + colored(table, "cyan"))
39
40

◆ verify_results()

fastreid.evaluation.testing.verify_results ( cfg,
results )
Args:
    results (OrderedDict[dict]): task_name -> {metric -> score}
Returns:
    bool: whether the verification succeeds or not

Definition at line 41 of file testing.py.

41def verify_results(cfg, results):
42 """
43 Args:
44 results (OrderedDict[dict]): task_name -> {metric -> score}
45 Returns:
46 bool: whether the verification succeeds or not
47 """
48 expected_results = cfg.TEST.EXPECTED_RESULTS
49 if not len(expected_results):
50 return True
51
52 ok = True
53 for task, metric, expected, tolerance in expected_results:
54 actual = results[task][metric]
55 if not np.isfinite(actual):
56 ok = False
57 diff = abs(actual - expected)
58 if diff > tolerance:
59 ok = False
60
61 logger = logging.getLogger(__name__)
62 if not ok:
63 logger.error("Result verification failed!")
64 logger.error("Expected Results: " + str(expected_results))
65 logger.error("Actual Results: " + pprint.pformat(results))
66
67 sys.exit(1)
68 else:
69 logger.info("Results verification passed.")
70 return ok
71
72

Variable Documentation

◆ logger

fastreid.evaluation.testing.logger = logging.getLogger(__name__)

Definition at line 11 of file testing.py.