| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 8.788598574821853, | |
| "global_step": 18500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 1.980997624703088e-05, | |
| "loss": 0.4065, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 1.961995249406176e-05, | |
| "loss": 0.3019, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_accuracy": 0.9059633027522935, | |
| "eval_loss": 0.26677730679512024, | |
| "eval_runtime": 5.719, | |
| "eval_samples_per_second": 152.475, | |
| "eval_steps_per_second": 4.896, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 1.9429928741092638e-05, | |
| "loss": 0.2873, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 1.9239904988123516e-05, | |
| "loss": 0.2756, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 1.9049881235154395e-05, | |
| "loss": 0.2514, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_accuracy": 0.9220183486238532, | |
| "eval_loss": 0.20395953953266144, | |
| "eval_runtime": 4.9214, | |
| "eval_samples_per_second": 177.184, | |
| "eval_steps_per_second": 5.689, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 1.8859857482185277e-05, | |
| "loss": 0.2292, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 1.8669833729216152e-05, | |
| "loss": 0.2223, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "eval_accuracy": 0.9254587155963303, | |
| "eval_loss": 0.20957307517528534, | |
| "eval_runtime": 9.3475, | |
| "eval_samples_per_second": 93.287, | |
| "eval_steps_per_second": 2.995, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 1.847980997624703e-05, | |
| "loss": 0.2206, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 1.8289786223277913e-05, | |
| "loss": 0.2177, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 1.809976247030879e-05, | |
| "loss": 0.2141, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_accuracy": 0.9197247706422018, | |
| "eval_loss": 0.21411468088626862, | |
| "eval_runtime": 5.5585, | |
| "eval_samples_per_second": 156.877, | |
| "eval_steps_per_second": 5.037, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 1.790973871733967e-05, | |
| "loss": 0.191, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 1.7719714964370545e-05, | |
| "loss": 0.1615, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "eval_accuracy": 0.9277522935779816, | |
| "eval_loss": 0.23593279719352722, | |
| "eval_runtime": 6.3221, | |
| "eval_samples_per_second": 137.929, | |
| "eval_steps_per_second": 4.429, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 1.7529691211401427e-05, | |
| "loss": 0.1598, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 1.7339667458432306e-05, | |
| "loss": 0.1513, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 1.7149643705463184e-05, | |
| "loss": 0.1696, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "eval_accuracy": 0.9277522935779816, | |
| "eval_loss": 0.22338584065437317, | |
| "eval_runtime": 4.5931, | |
| "eval_samples_per_second": 189.852, | |
| "eval_steps_per_second": 6.096, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 1.6959619952494063e-05, | |
| "loss": 0.1598, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 1.676959619952494e-05, | |
| "loss": 0.1704, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "eval_accuracy": 0.926605504587156, | |
| "eval_loss": 0.20875951647758484, | |
| "eval_runtime": 5.0919, | |
| "eval_samples_per_second": 171.251, | |
| "eval_steps_per_second": 5.499, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 1.657957244655582e-05, | |
| "loss": 0.1584, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 1.63895486935867e-05, | |
| "loss": 0.171, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 1.6199524940617578e-05, | |
| "loss": 0.1443, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "eval_accuracy": 0.9288990825688074, | |
| "eval_loss": 0.22995886206626892, | |
| "eval_runtime": 5.7731, | |
| "eval_samples_per_second": 151.046, | |
| "eval_steps_per_second": 4.85, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 1.6009501187648456e-05, | |
| "loss": 0.1498, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 1.5819477434679335e-05, | |
| "loss": 0.1212, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "eval_accuracy": 0.9311926605504587, | |
| "eval_loss": 0.25349077582359314, | |
| "eval_runtime": 4.9282, | |
| "eval_samples_per_second": 176.941, | |
| "eval_steps_per_second": 5.682, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "learning_rate": 1.5629453681710217e-05, | |
| "loss": 0.133, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 1.5439429928741095e-05, | |
| "loss": 0.1213, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 1.5249406175771972e-05, | |
| "loss": 0.1283, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "eval_accuracy": 0.9277522935779816, | |
| "eval_loss": 0.2518787384033203, | |
| "eval_runtime": 6.251, | |
| "eval_samples_per_second": 139.497, | |
| "eval_steps_per_second": 4.479, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "learning_rate": 1.5059382422802851e-05, | |
| "loss": 0.12, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 1.4869358669833731e-05, | |
| "loss": 0.1313, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "eval_accuracy": 0.9094036697247706, | |
| "eval_loss": 0.3524125814437866, | |
| "eval_runtime": 5.7105, | |
| "eval_samples_per_second": 152.701, | |
| "eval_steps_per_second": 4.903, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "learning_rate": 1.467933491686461e-05, | |
| "loss": 0.1196, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 1.4489311163895487e-05, | |
| "loss": 0.123, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 1.4299287410926367e-05, | |
| "loss": 0.1304, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "eval_accuracy": 0.9243119266055045, | |
| "eval_loss": 0.24740684032440186, | |
| "eval_runtime": 11.1285, | |
| "eval_samples_per_second": 78.357, | |
| "eval_steps_per_second": 2.516, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "learning_rate": 1.4109263657957246e-05, | |
| "loss": 0.1248, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "learning_rate": 1.3919239904988124e-05, | |
| "loss": 0.1061, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "eval_accuracy": 0.9231651376146789, | |
| "eval_loss": 0.2941964566707611, | |
| "eval_runtime": 8.2674, | |
| "eval_samples_per_second": 105.475, | |
| "eval_steps_per_second": 3.387, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "learning_rate": 1.3729216152019003e-05, | |
| "loss": 0.0876, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 1.3539192399049883e-05, | |
| "loss": 0.0922, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 1.3349168646080762e-05, | |
| "loss": 0.0988, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "eval_accuracy": 0.9311926605504587, | |
| "eval_loss": 0.2566107213497162, | |
| "eval_runtime": 6.2677, | |
| "eval_samples_per_second": 139.127, | |
| "eval_steps_per_second": 4.467, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "learning_rate": 1.3159144893111639e-05, | |
| "loss": 0.1097, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 1.296912114014252e-05, | |
| "loss": 0.0938, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "eval_accuracy": 0.930045871559633, | |
| "eval_loss": 0.31746456027030945, | |
| "eval_runtime": 4.983, | |
| "eval_samples_per_second": 174.994, | |
| "eval_steps_per_second": 5.619, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.61, | |
| "learning_rate": 1.2779097387173398e-05, | |
| "loss": 0.1079, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 1.2589073634204277e-05, | |
| "loss": 0.1023, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "learning_rate": 1.2399049881235155e-05, | |
| "loss": 0.1015, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "eval_accuracy": 0.9311926605504587, | |
| "eval_loss": 0.3047061264514923, | |
| "eval_runtime": 5.0268, | |
| "eval_samples_per_second": 173.471, | |
| "eval_steps_per_second": 5.57, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 1.2209026128266036e-05, | |
| "loss": 0.1076, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 1.2019002375296912e-05, | |
| "loss": 0.1103, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "eval_accuracy": 0.9277522935779816, | |
| "eval_loss": 0.34547296166419983, | |
| "eval_runtime": 6.2445, | |
| "eval_samples_per_second": 139.642, | |
| "eval_steps_per_second": 4.484, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 4.09, | |
| "learning_rate": 1.1828978622327791e-05, | |
| "loss": 0.0766, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 4.18, | |
| "learning_rate": 1.1638954869358671e-05, | |
| "loss": 0.0778, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 4.28, | |
| "learning_rate": 1.144893111638955e-05, | |
| "loss": 0.0841, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.28, | |
| "eval_accuracy": 0.9185779816513762, | |
| "eval_loss": 0.37421032786369324, | |
| "eval_runtime": 5.4233, | |
| "eval_samples_per_second": 160.787, | |
| "eval_steps_per_second": 5.163, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.37, | |
| "learning_rate": 1.1258907363420429e-05, | |
| "loss": 0.0774, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 4.47, | |
| "learning_rate": 1.1068883610451307e-05, | |
| "loss": 0.0839, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 4.51, | |
| "eval_accuracy": 0.9220183486238532, | |
| "eval_loss": 0.3504032492637634, | |
| "eval_runtime": 6.6438, | |
| "eval_samples_per_second": 131.249, | |
| "eval_steps_per_second": 4.214, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 4.56, | |
| "learning_rate": 1.0878859857482188e-05, | |
| "loss": 0.0857, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 4.66, | |
| "learning_rate": 1.0688836104513065e-05, | |
| "loss": 0.091, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 1.0498812351543943e-05, | |
| "loss": 0.0858, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "eval_accuracy": 0.9288990825688074, | |
| "eval_loss": 0.2815171778202057, | |
| "eval_runtime": 4.9169, | |
| "eval_samples_per_second": 177.347, | |
| "eval_steps_per_second": 5.695, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 4.85, | |
| "learning_rate": 1.0308788598574823e-05, | |
| "loss": 0.0852, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 4.94, | |
| "learning_rate": 1.0118764845605702e-05, | |
| "loss": 0.0897, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "eval_accuracy": 0.9220183486238532, | |
| "eval_loss": 0.3205639123916626, | |
| "eval_runtime": 7.2765, | |
| "eval_samples_per_second": 119.839, | |
| "eval_steps_per_second": 3.848, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 5.04, | |
| "learning_rate": 9.92874109263658e-06, | |
| "loss": 0.076, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 5.13, | |
| "learning_rate": 9.73871733966746e-06, | |
| "loss": 0.067, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 5.23, | |
| "learning_rate": 9.548693586698338e-06, | |
| "loss": 0.0641, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.23, | |
| "eval_accuracy": 0.9334862385321101, | |
| "eval_loss": 0.32430100440979004, | |
| "eval_runtime": 5.6582, | |
| "eval_samples_per_second": 154.113, | |
| "eval_steps_per_second": 4.949, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.32, | |
| "learning_rate": 9.358669833729217e-06, | |
| "loss": 0.0657, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 5.42, | |
| "learning_rate": 9.168646080760095e-06, | |
| "loss": 0.0758, | |
| "step": 11400 | |
| }, | |
| { | |
| "epoch": 5.46, | |
| "eval_accuracy": 0.9311926605504587, | |
| "eval_loss": 0.29846712946891785, | |
| "eval_runtime": 4.9813, | |
| "eval_samples_per_second": 175.054, | |
| "eval_steps_per_second": 5.621, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 5.51, | |
| "learning_rate": 8.978622327790974e-06, | |
| "loss": 0.0705, | |
| "step": 11600 | |
| }, | |
| { | |
| "epoch": 5.61, | |
| "learning_rate": 8.788598574821854e-06, | |
| "loss": 0.065, | |
| "step": 11800 | |
| }, | |
| { | |
| "epoch": 5.7, | |
| "learning_rate": 8.598574821852733e-06, | |
| "loss": 0.0744, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 5.7, | |
| "eval_accuracy": 0.9151376146788991, | |
| "eval_loss": 0.35644832253456116, | |
| "eval_runtime": 5.3372, | |
| "eval_samples_per_second": 163.382, | |
| "eval_steps_per_second": 5.246, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 5.8, | |
| "learning_rate": 8.408551068883611e-06, | |
| "loss": 0.07, | |
| "step": 12200 | |
| }, | |
| { | |
| "epoch": 5.89, | |
| "learning_rate": 8.21852731591449e-06, | |
| "loss": 0.0735, | |
| "step": 12400 | |
| }, | |
| { | |
| "epoch": 5.94, | |
| "eval_accuracy": 0.9220183486238532, | |
| "eval_loss": 0.36781516671180725, | |
| "eval_runtime": 5.0223, | |
| "eval_samples_per_second": 173.627, | |
| "eval_steps_per_second": 5.575, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 5.99, | |
| "learning_rate": 8.028503562945369e-06, | |
| "loss": 0.0683, | |
| "step": 12600 | |
| }, | |
| { | |
| "epoch": 6.08, | |
| "learning_rate": 7.838479809976247e-06, | |
| "loss": 0.0519, | |
| "step": 12800 | |
| }, | |
| { | |
| "epoch": 6.18, | |
| "learning_rate": 7.648456057007126e-06, | |
| "loss": 0.0581, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 6.18, | |
| "eval_accuracy": 0.9323394495412844, | |
| "eval_loss": 0.3235052824020386, | |
| "eval_runtime": 5.3353, | |
| "eval_samples_per_second": 163.439, | |
| "eval_steps_per_second": 5.248, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 6.27, | |
| "learning_rate": 7.458432304038005e-06, | |
| "loss": 0.0575, | |
| "step": 13200 | |
| }, | |
| { | |
| "epoch": 6.37, | |
| "learning_rate": 7.268408551068884e-06, | |
| "loss": 0.0605, | |
| "step": 13400 | |
| }, | |
| { | |
| "epoch": 6.41, | |
| "eval_accuracy": 0.9254587155963303, | |
| "eval_loss": 0.3677641451358795, | |
| "eval_runtime": 4.1548, | |
| "eval_samples_per_second": 209.877, | |
| "eval_steps_per_second": 6.739, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 6.46, | |
| "learning_rate": 7.0783847980997635e-06, | |
| "loss": 0.0548, | |
| "step": 13600 | |
| }, | |
| { | |
| "epoch": 6.56, | |
| "learning_rate": 6.888361045130641e-06, | |
| "loss": 0.054, | |
| "step": 13800 | |
| }, | |
| { | |
| "epoch": 6.65, | |
| "learning_rate": 6.698337292161521e-06, | |
| "loss": 0.0608, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 6.65, | |
| "eval_accuracy": 0.9231651376146789, | |
| "eval_loss": 0.35537707805633545, | |
| "eval_runtime": 5.0168, | |
| "eval_samples_per_second": 173.816, | |
| "eval_steps_per_second": 5.581, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 6.75, | |
| "learning_rate": 6.508313539192399e-06, | |
| "loss": 0.0589, | |
| "step": 14200 | |
| }, | |
| { | |
| "epoch": 6.84, | |
| "learning_rate": 6.318289786223278e-06, | |
| "loss": 0.055, | |
| "step": 14400 | |
| }, | |
| { | |
| "epoch": 6.89, | |
| "eval_accuracy": 0.9277522935779816, | |
| "eval_loss": 0.3431534171104431, | |
| "eval_runtime": 5.9333, | |
| "eval_samples_per_second": 146.968, | |
| "eval_steps_per_second": 4.719, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 6.94, | |
| "learning_rate": 6.1282660332541575e-06, | |
| "loss": 0.062, | |
| "step": 14600 | |
| }, | |
| { | |
| "epoch": 7.03, | |
| "learning_rate": 5.938242280285036e-06, | |
| "loss": 0.0571, | |
| "step": 14800 | |
| }, | |
| { | |
| "epoch": 7.13, | |
| "learning_rate": 5.748218527315916e-06, | |
| "loss": 0.0498, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 7.13, | |
| "eval_accuracy": 0.9288990825688074, | |
| "eval_loss": 0.3633643090724945, | |
| "eval_runtime": 4.8896, | |
| "eval_samples_per_second": 178.339, | |
| "eval_steps_per_second": 5.726, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 7.22, | |
| "learning_rate": 5.558194774346793e-06, | |
| "loss": 0.0569, | |
| "step": 15200 | |
| }, | |
| { | |
| "epoch": 7.32, | |
| "learning_rate": 5.368171021377673e-06, | |
| "loss": 0.0531, | |
| "step": 15400 | |
| }, | |
| { | |
| "epoch": 7.36, | |
| "eval_accuracy": 0.9277522935779816, | |
| "eval_loss": 0.34541434049606323, | |
| "eval_runtime": 5.4179, | |
| "eval_samples_per_second": 160.949, | |
| "eval_steps_per_second": 5.168, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 7.41, | |
| "learning_rate": 5.178147268408551e-06, | |
| "loss": 0.0501, | |
| "step": 15600 | |
| }, | |
| { | |
| "epoch": 7.51, | |
| "learning_rate": 4.98812351543943e-06, | |
| "loss": 0.0501, | |
| "step": 15800 | |
| }, | |
| { | |
| "epoch": 7.6, | |
| "learning_rate": 4.798099762470309e-06, | |
| "loss": 0.0521, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 7.6, | |
| "eval_accuracy": 0.9243119266055045, | |
| "eval_loss": 0.3439008593559265, | |
| "eval_runtime": 4.9353, | |
| "eval_samples_per_second": 176.685, | |
| "eval_steps_per_second": 5.673, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 7.7, | |
| "learning_rate": 4.608076009501188e-06, | |
| "loss": 0.0452, | |
| "step": 16200 | |
| }, | |
| { | |
| "epoch": 7.79, | |
| "learning_rate": 4.418052256532067e-06, | |
| "loss": 0.0481, | |
| "step": 16400 | |
| }, | |
| { | |
| "epoch": 7.84, | |
| "eval_accuracy": 0.9254587155963303, | |
| "eval_loss": 0.36306628584861755, | |
| "eval_runtime": 5.8137, | |
| "eval_samples_per_second": 149.99, | |
| "eval_steps_per_second": 4.816, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 7.89, | |
| "learning_rate": 4.228028503562946e-06, | |
| "loss": 0.0514, | |
| "step": 16600 | |
| }, | |
| { | |
| "epoch": 7.98, | |
| "learning_rate": 4.038004750593825e-06, | |
| "loss": 0.0486, | |
| "step": 16800 | |
| }, | |
| { | |
| "epoch": 8.08, | |
| "learning_rate": 3.8479809976247036e-06, | |
| "loss": 0.0386, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 8.08, | |
| "eval_accuracy": 0.930045871559633, | |
| "eval_loss": 0.37118515372276306, | |
| "eval_runtime": 6.6001, | |
| "eval_samples_per_second": 132.12, | |
| "eval_steps_per_second": 4.242, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 8.17, | |
| "learning_rate": 3.657957244655582e-06, | |
| "loss": 0.036, | |
| "step": 17200 | |
| }, | |
| { | |
| "epoch": 8.27, | |
| "learning_rate": 3.467933491686461e-06, | |
| "loss": 0.0399, | |
| "step": 17400 | |
| }, | |
| { | |
| "epoch": 8.31, | |
| "eval_accuracy": 0.9346330275229358, | |
| "eval_loss": 0.3605540692806244, | |
| "eval_runtime": 5.1639, | |
| "eval_samples_per_second": 168.866, | |
| "eval_steps_per_second": 5.422, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 8.36, | |
| "learning_rate": 3.27790973871734e-06, | |
| "loss": 0.0436, | |
| "step": 17600 | |
| }, | |
| { | |
| "epoch": 8.46, | |
| "learning_rate": 3.0878859857482185e-06, | |
| "loss": 0.0417, | |
| "step": 17800 | |
| }, | |
| { | |
| "epoch": 8.55, | |
| "learning_rate": 2.897862232779098e-06, | |
| "loss": 0.048, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 8.55, | |
| "eval_accuracy": 0.9380733944954128, | |
| "eval_loss": 0.3584529757499695, | |
| "eval_runtime": 5.8003, | |
| "eval_samples_per_second": 150.337, | |
| "eval_steps_per_second": 4.827, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 8.65, | |
| "learning_rate": 2.7078384798099766e-06, | |
| "loss": 0.0384, | |
| "step": 18200 | |
| }, | |
| { | |
| "epoch": 8.74, | |
| "learning_rate": 2.5178147268408552e-06, | |
| "loss": 0.0522, | |
| "step": 18400 | |
| }, | |
| { | |
| "epoch": 8.79, | |
| "eval_accuracy": 0.9334862385321101, | |
| "eval_loss": 0.337113618850708, | |
| "eval_runtime": 5.4571, | |
| "eval_samples_per_second": 159.791, | |
| "eval_steps_per_second": 5.131, | |
| "step": 18500 | |
| } | |
| ], | |
| "max_steps": 21050, | |
| "num_train_epochs": 10, | |
| "total_flos": 4.538296624748544e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |