File size: 3,516 Bytes
f977fd5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import pytest
from pathlib import Path
import pandas as pd
from typing import Dict
import pickle
from utils.compile_results import *
from utils.constants import NORM_BASE_SUBMISSION

root = Path(__file__).parent.resolve()
root = str(root)

SUBMISSION_NAMES = ['test_submission_1']
MODEL_NAMES = ["ssl4eos12_resnet50_sentinel2_all_decur","ssl4eos12_resnet50_sentinel2_all_dino"]


@pytest.fixture
def expected_output():
    with open(f"{root}/resources/outputs/test_output.pkl", 'rb') as handle:
        expected_results = pickle.load(handle)
    return expected_results


class TestLoading:
    all_submission_results, all_model_names, all_submissions = load_results(folder=f"{root}/resources/inputs")    
    def test_submission_results(self):
        for _, value in self.all_submission_results.items():
            assert "results" in value
            assert type(value["results"]) == pd.DataFrame

            columns_to_be_added = {"# params", "Model", "Config Settings"}
            existing_columns = set(value["results"].columns)
            assert columns_to_be_added.issubset(existing_columns)

    def test_model_names(self):
        assert sorted(self.all_model_names) == sorted(MODEL_NAMES)

    def test_submission_names(self):
        assert sorted(self.all_submissions) == SUBMISSION_NAMES
 


class TestComputeResults:
    #compute output
    all_submission_results, all_model_names, all_submissions = load_results(folder=f"{root}/resources/inputs")    
    benchmark_name = f"leaderboard_{NORM_BASE_SUBMISSION}_main"  
    all_iqms = compute_all_iqms(all_submission_results =  all_submission_results,
                                benchmark_name = benchmark_name
                                )
    overall_performance_tables = get_overall_performance_table(all_submission_results=all_submission_results, 
                                                                all_iqms=all_iqms)
    performance_by_dimension_tables = get_performance_by_dimension_table(all_submission_results=all_submission_results, 
                                                                         all_iqms=all_iqms)
    datasets_tables = get_datasets_tables(all_submission_results=all_submission_results, 
                                            all_iqms=all_iqms)
    
    def test_compute_all_iqms(self, expected_output):
        assert sorted(self.all_iqms.keys()) == sorted(self.all_submission_results.keys())
        assert "overall_performance_tables" in expected_output

        for submission, submission_value in self.all_iqms.items():
            assert sorted(submission_value.keys()) == sorted(expected_output["all_iqms"][submission].keys())#sorted(ALL_TABLES)

            for table_name, table in submission_value.items():
                #print(f"{table_name}: {type(table) == pd.DataFrame}")
                #print(f"table.columns: {table.columns}")
                #print(f"expected_output['all_iqms'][submission][table_name]{expected_output['all_iqms'][submission][table_name].columns}")
                assert type(table) == pd.DataFrame
                #assert table.equals(expected_output["all_iqms"][submission][table_name])


    
    def test_raw_values(self):
        assert "raw" in self.overall_performance_tables
        #dataset values
        #overall values
        #dimension values

    def test_normalized_values(self):
        assert "normalized" in self.overall_performance_tables
        #dataset values
        #overall values
        #dimension values
        pass