| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.986046511627907, |
| "eval_steps": 18, |
| "global_step": 159, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.018604651162790697, |
| "grad_norm": 10.013904683091026, |
| "learning_rate": 6.666666666666667e-07, |
| "loss": 1.8321, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.018604651162790697, |
| "eval_loss": 1.7002108097076416, |
| "eval_runtime": 20.4169, |
| "eval_samples_per_second": 1.714, |
| "eval_steps_per_second": 0.147, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.037209302325581395, |
| "grad_norm": 9.590324467022036, |
| "learning_rate": 1.3333333333333334e-06, |
| "loss": 1.7277, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.05581395348837209, |
| "grad_norm": 9.706832022009003, |
| "learning_rate": 2.0000000000000003e-06, |
| "loss": 1.7223, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.07441860465116279, |
| "grad_norm": 9.191020788709961, |
| "learning_rate": 2.666666666666667e-06, |
| "loss": 1.4136, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.09302325581395349, |
| "grad_norm": 5.1972501803404585, |
| "learning_rate": 3.3333333333333333e-06, |
| "loss": 0.8326, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.11162790697674418, |
| "grad_norm": 5.003176836832652, |
| "learning_rate": 4.000000000000001e-06, |
| "loss": 0.6748, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.13023255813953488, |
| "grad_norm": 5.621242613710083, |
| "learning_rate": 4.666666666666667e-06, |
| "loss": 0.3525, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.14883720930232558, |
| "grad_norm": 2.073120710995121, |
| "learning_rate": 5.333333333333334e-06, |
| "loss": 0.1927, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.16744186046511628, |
| "grad_norm": 5.058621992174738, |
| "learning_rate": 6e-06, |
| "loss": 0.1726, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.18604651162790697, |
| "grad_norm": 0.484559270346389, |
| "learning_rate": 6.666666666666667e-06, |
| "loss": 0.0581, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.20465116279069767, |
| "grad_norm": 0.27879369422819283, |
| "learning_rate": 7.333333333333333e-06, |
| "loss": 0.0402, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.22325581395348837, |
| "grad_norm": 1.2600063838891933, |
| "learning_rate": 8.000000000000001e-06, |
| "loss": 0.1245, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.24186046511627907, |
| "grad_norm": 1.3601622035054102, |
| "learning_rate": 8.666666666666668e-06, |
| "loss": 0.0771, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.26046511627906976, |
| "grad_norm": 0.4450667341423361, |
| "learning_rate": 9.333333333333334e-06, |
| "loss": 0.0433, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.27906976744186046, |
| "grad_norm": 0.2535563331520209, |
| "learning_rate": 1e-05, |
| "loss": 0.0361, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.29767441860465116, |
| "grad_norm": 0.3292167013274557, |
| "learning_rate": 1.0666666666666667e-05, |
| "loss": 0.0323, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.31627906976744186, |
| "grad_norm": 0.2819804022063467, |
| "learning_rate": 1.1333333333333334e-05, |
| "loss": 0.0288, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.33488372093023255, |
| "grad_norm": 0.1945402606232617, |
| "learning_rate": 1.2e-05, |
| "loss": 0.025, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.33488372093023255, |
| "eval_loss": 0.011246235109865665, |
| "eval_runtime": 17.9147, |
| "eval_samples_per_second": 1.954, |
| "eval_steps_per_second": 0.167, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.35348837209302325, |
| "grad_norm": 0.21212477357360185, |
| "learning_rate": 1.2666666666666667e-05, |
| "loss": 0.0267, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.37209302325581395, |
| "grad_norm": 0.16069147836386155, |
| "learning_rate": 1.3333333333333333e-05, |
| "loss": 0.0183, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.39069767441860465, |
| "grad_norm": 0.16338009125630443, |
| "learning_rate": 1.4e-05, |
| "loss": 0.0206, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.40930232558139534, |
| "grad_norm": 0.1853675731295865, |
| "learning_rate": 1.4666666666666666e-05, |
| "loss": 0.0257, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.42790697674418604, |
| "grad_norm": 0.17527610457824952, |
| "learning_rate": 1.5333333333333334e-05, |
| "loss": 0.0249, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.44651162790697674, |
| "grad_norm": 0.11302300812639408, |
| "learning_rate": 1.6000000000000003e-05, |
| "loss": 0.0136, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.46511627906976744, |
| "grad_norm": 0.12428999247200254, |
| "learning_rate": 1.6666666666666667e-05, |
| "loss": 0.0147, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.48372093023255813, |
| "grad_norm": 0.1515400632408266, |
| "learning_rate": 1.7333333333333336e-05, |
| "loss": 0.0174, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.5023255813953489, |
| "grad_norm": 0.1364820504324016, |
| "learning_rate": 1.8e-05, |
| "loss": 0.0135, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.5209302325581395, |
| "grad_norm": 0.08324402722467575, |
| "learning_rate": 1.866666666666667e-05, |
| "loss": 0.013, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.5395348837209303, |
| "grad_norm": 0.15171776647001634, |
| "learning_rate": 1.9333333333333333e-05, |
| "loss": 0.022, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.5581395348837209, |
| "grad_norm": 0.10477420425994877, |
| "learning_rate": 2e-05, |
| "loss": 0.0095, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.5767441860465117, |
| "grad_norm": 0.07782298000167698, |
| "learning_rate": 1.9997034698451396e-05, |
| "loss": 0.0127, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.5953488372093023, |
| "grad_norm": 0.08605755584398436, |
| "learning_rate": 1.998814055240823e-05, |
| "loss": 0.0083, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.6139534883720931, |
| "grad_norm": 0.2278351783000692, |
| "learning_rate": 1.9973322836635517e-05, |
| "loss": 0.0365, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.6325581395348837, |
| "grad_norm": 0.0939827949218066, |
| "learning_rate": 1.995259033893236e-05, |
| "loss": 0.0115, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.6511627906976745, |
| "grad_norm": 0.18207228380286333, |
| "learning_rate": 1.9925955354920265e-05, |
| "loss": 0.0206, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.6697674418604651, |
| "grad_norm": 0.10243227406250023, |
| "learning_rate": 1.9893433680751105e-05, |
| "loss": 0.0189, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.6697674418604651, |
| "eval_loss": 0.01186442095786333, |
| "eval_runtime": 17.7681, |
| "eval_samples_per_second": 1.97, |
| "eval_steps_per_second": 0.169, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.6883720930232559, |
| "grad_norm": 0.09110796973669487, |
| "learning_rate": 1.985504460373903e-05, |
| "loss": 0.0136, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.7069767441860465, |
| "grad_norm": 0.11267598006158698, |
| "learning_rate": 1.9810810890921943e-05, |
| "loss": 0.0165, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.7255813953488373, |
| "grad_norm": 0.06102186903057507, |
| "learning_rate": 1.9760758775559275e-05, |
| "loss": 0.0126, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.7441860465116279, |
| "grad_norm": 0.07606506531845961, |
| "learning_rate": 1.9704917941574053e-05, |
| "loss": 0.0089, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.7627906976744186, |
| "grad_norm": 0.08740043178381678, |
| "learning_rate": 1.9643321505948588e-05, |
| "loss": 0.0107, |
| "step": 41 |
| }, |
| { |
| "epoch": 0.7813953488372093, |
| "grad_norm": 0.07346457791016978, |
| "learning_rate": 1.957600599908406e-05, |
| "loss": 0.012, |
| "step": 42 |
| }, |
| { |
| "epoch": 0.8, |
| "grad_norm": 0.05256355785683351, |
| "learning_rate": 1.9503011343135828e-05, |
| "loss": 0.0077, |
| "step": 43 |
| }, |
| { |
| "epoch": 0.8186046511627907, |
| "grad_norm": 0.06730657099562354, |
| "learning_rate": 1.9424380828337146e-05, |
| "loss": 0.0096, |
| "step": 44 |
| }, |
| { |
| "epoch": 0.8372093023255814, |
| "grad_norm": 0.09049437049992155, |
| "learning_rate": 1.9340161087325483e-05, |
| "loss": 0.0147, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.8558139534883721, |
| "grad_norm": 0.11718130416581084, |
| "learning_rate": 1.9250402067486523e-05, |
| "loss": 0.0209, |
| "step": 46 |
| }, |
| { |
| "epoch": 0.8744186046511628, |
| "grad_norm": 0.11920106288847335, |
| "learning_rate": 1.9155157001332374e-05, |
| "loss": 0.0187, |
| "step": 47 |
| }, |
| { |
| "epoch": 0.8930232558139535, |
| "grad_norm": 0.08573826959920579, |
| "learning_rate": 1.905448237493147e-05, |
| "loss": 0.0128, |
| "step": 48 |
| }, |
| { |
| "epoch": 0.9116279069767442, |
| "grad_norm": 0.14226356094863013, |
| "learning_rate": 1.894843789440892e-05, |
| "loss": 0.0154, |
| "step": 49 |
| }, |
| { |
| "epoch": 0.9302325581395349, |
| "grad_norm": 0.10221422172872822, |
| "learning_rate": 1.8837086450537195e-05, |
| "loss": 0.0127, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.9488372093023256, |
| "grad_norm": 0.1166202206923503, |
| "learning_rate": 1.872049408143808e-05, |
| "loss": 0.016, |
| "step": 51 |
| }, |
| { |
| "epoch": 0.9674418604651163, |
| "grad_norm": 0.10084456140423266, |
| "learning_rate": 1.8598729933418102e-05, |
| "loss": 0.0175, |
| "step": 52 |
| }, |
| { |
| "epoch": 0.986046511627907, |
| "grad_norm": 0.1311416317041396, |
| "learning_rate": 1.8471866219960604e-05, |
| "loss": 0.0247, |
| "step": 53 |
| }, |
| { |
| "epoch": 1.0186046511627906, |
| "grad_norm": 0.28490252925519627, |
| "learning_rate": 1.833997817889878e-05, |
| "loss": 0.0344, |
| "step": 54 |
| }, |
| { |
| "epoch": 1.0186046511627906, |
| "eval_loss": 0.013307686895132065, |
| "eval_runtime": 7.8647, |
| "eval_samples_per_second": 4.45, |
| "eval_steps_per_second": 0.381, |
| "step": 54 |
| }, |
| { |
| "epoch": 1.0372093023255813, |
| "grad_norm": 0.12394873291936631, |
| "learning_rate": 1.820314402779511e-05, |
| "loss": 0.0154, |
| "step": 55 |
| }, |
| { |
| "epoch": 1.0558139534883721, |
| "grad_norm": 0.07405539009849099, |
| "learning_rate": 1.806144491755363e-05, |
| "loss": 0.0122, |
| "step": 56 |
| }, |
| { |
| "epoch": 1.0744186046511628, |
| "grad_norm": 0.0893380692931903, |
| "learning_rate": 1.7914964884292543e-05, |
| "loss": 0.013, |
| "step": 57 |
| }, |
| { |
| "epoch": 1.0930232558139534, |
| "grad_norm": 0.1333370996316984, |
| "learning_rate": 1.7763790799505746e-05, |
| "loss": 0.0146, |
| "step": 58 |
| }, |
| { |
| "epoch": 1.1116279069767443, |
| "grad_norm": 0.13999306352147337, |
| "learning_rate": 1.760801231854278e-05, |
| "loss": 0.0129, |
| "step": 59 |
| }, |
| { |
| "epoch": 1.130232558139535, |
| "grad_norm": 0.06256448391274093, |
| "learning_rate": 1.744772182743782e-05, |
| "loss": 0.0104, |
| "step": 60 |
| }, |
| { |
| "epoch": 1.1488372093023256, |
| "grad_norm": 0.06731235867523412, |
| "learning_rate": 1.728301438811916e-05, |
| "loss": 0.0104, |
| "step": 61 |
| }, |
| { |
| "epoch": 1.1674418604651162, |
| "grad_norm": 0.09450571159679347, |
| "learning_rate": 1.711398768203178e-05, |
| "loss": 0.012, |
| "step": 62 |
| }, |
| { |
| "epoch": 1.1860465116279069, |
| "grad_norm": 0.05090058468218199, |
| "learning_rate": 1.6940741952206342e-05, |
| "loss": 0.0043, |
| "step": 63 |
| }, |
| { |
| "epoch": 1.2046511627906977, |
| "grad_norm": 0.08578222401956319, |
| "learning_rate": 1.676337994380903e-05, |
| "loss": 0.0111, |
| "step": 64 |
| }, |
| { |
| "epoch": 1.2232558139534884, |
| "grad_norm": 0.07476021551161319, |
| "learning_rate": 1.658200684320748e-05, |
| "loss": 0.0113, |
| "step": 65 |
| }, |
| { |
| "epoch": 1.241860465116279, |
| "grad_norm": 0.05790407785730431, |
| "learning_rate": 1.6396730215588913e-05, |
| "loss": 0.0069, |
| "step": 66 |
| }, |
| { |
| "epoch": 1.2604651162790699, |
| "grad_norm": 0.03379259184785083, |
| "learning_rate": 1.6207659941167485e-05, |
| "loss": 0.0043, |
| "step": 67 |
| }, |
| { |
| "epoch": 1.2790697674418605, |
| "grad_norm": 0.06256322776770672, |
| "learning_rate": 1.6014908150018703e-05, |
| "loss": 0.0104, |
| "step": 68 |
| }, |
| { |
| "epoch": 1.2976744186046512, |
| "grad_norm": 0.07073330621501295, |
| "learning_rate": 1.581858915557953e-05, |
| "loss": 0.0116, |
| "step": 69 |
| }, |
| { |
| "epoch": 1.3162790697674418, |
| "grad_norm": 0.057466240493530954, |
| "learning_rate": 1.5618819386853607e-05, |
| "loss": 0.0106, |
| "step": 70 |
| }, |
| { |
| "epoch": 1.3348837209302324, |
| "grad_norm": 0.05473293443561517, |
| "learning_rate": 1.541571731936185e-05, |
| "loss": 0.0078, |
| "step": 71 |
| }, |
| { |
| "epoch": 1.3534883720930233, |
| "grad_norm": 0.05900255050172093, |
| "learning_rate": 1.5209403404879305e-05, |
| "loss": 0.0074, |
| "step": 72 |
| }, |
| { |
| "epoch": 1.3534883720930233, |
| "eval_loss": 0.007977687753736973, |
| "eval_runtime": 7.8593, |
| "eval_samples_per_second": 4.453, |
| "eval_steps_per_second": 0.382, |
| "step": 72 |
| }, |
| { |
| "epoch": 1.372093023255814, |
| "grad_norm": 0.08510000598742681, |
| "learning_rate": 1.5000000000000002e-05, |
| "loss": 0.0108, |
| "step": 73 |
| }, |
| { |
| "epoch": 1.3906976744186046, |
| "grad_norm": 0.05877743214254424, |
| "learning_rate": 1.4787631293572094e-05, |
| "loss": 0.0059, |
| "step": 74 |
| }, |
| { |
| "epoch": 1.4093023255813955, |
| "grad_norm": 0.057513786613874074, |
| "learning_rate": 1.4572423233046386e-05, |
| "loss": 0.0041, |
| "step": 75 |
| }, |
| { |
| "epoch": 1.427906976744186, |
| "grad_norm": 0.062093441896676184, |
| "learning_rate": 1.4354503449781914e-05, |
| "loss": 0.0068, |
| "step": 76 |
| }, |
| { |
| "epoch": 1.4465116279069767, |
| "grad_norm": 0.044554227704026034, |
| "learning_rate": 1.4134001183352833e-05, |
| "loss": 0.006, |
| "step": 77 |
| }, |
| { |
| "epoch": 1.4651162790697674, |
| "grad_norm": 0.03443289212911578, |
| "learning_rate": 1.391104720490156e-05, |
| "loss": 0.009, |
| "step": 78 |
| }, |
| { |
| "epoch": 1.483720930232558, |
| "grad_norm": 0.0779129764921489, |
| "learning_rate": 1.368577373958362e-05, |
| "loss": 0.0068, |
| "step": 79 |
| }, |
| { |
| "epoch": 1.5023255813953489, |
| "grad_norm": 0.07985807669133597, |
| "learning_rate": 1.3458314388150115e-05, |
| "loss": 0.0088, |
| "step": 80 |
| }, |
| { |
| "epoch": 1.5209302325581395, |
| "grad_norm": 0.05695282478335252, |
| "learning_rate": 1.3228804047714462e-05, |
| "loss": 0.0089, |
| "step": 81 |
| }, |
| { |
| "epoch": 1.5395348837209304, |
| "grad_norm": 0.08518437375949815, |
| "learning_rate": 1.2997378831750242e-05, |
| "loss": 0.0092, |
| "step": 82 |
| }, |
| { |
| "epoch": 1.558139534883721, |
| "grad_norm": 0.07685898902633663, |
| "learning_rate": 1.2764175989367717e-05, |
| "loss": 0.0088, |
| "step": 83 |
| }, |
| { |
| "epoch": 1.5767441860465117, |
| "grad_norm": 0.049835581641918786, |
| "learning_rate": 1.2529333823916807e-05, |
| "loss": 0.0049, |
| "step": 84 |
| }, |
| { |
| "epoch": 1.5953488372093023, |
| "grad_norm": 0.03667598853117914, |
| "learning_rate": 1.2292991610964902e-05, |
| "loss": 0.0054, |
| "step": 85 |
| }, |
| { |
| "epoch": 1.613953488372093, |
| "grad_norm": 0.056949095453050116, |
| "learning_rate": 1.2055289515698008e-05, |
| "loss": 0.0081, |
| "step": 86 |
| }, |
| { |
| "epoch": 1.6325581395348836, |
| "grad_norm": 0.048621142584313465, |
| "learning_rate": 1.1816368509794365e-05, |
| "loss": 0.0049, |
| "step": 87 |
| }, |
| { |
| "epoch": 1.6511627906976745, |
| "grad_norm": 0.039588705778341456, |
| "learning_rate": 1.1576370287819737e-05, |
| "loss": 0.0066, |
| "step": 88 |
| }, |
| { |
| "epoch": 1.669767441860465, |
| "grad_norm": 0.05016148760881635, |
| "learning_rate": 1.133543718319398e-05, |
| "loss": 0.0107, |
| "step": 89 |
| }, |
| { |
| "epoch": 1.688372093023256, |
| "grad_norm": 0.042663399533681944, |
| "learning_rate": 1.1093712083778748e-05, |
| "loss": 0.0042, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.688372093023256, |
| "eval_loss": 0.005588721018284559, |
| "eval_runtime": 7.874, |
| "eval_samples_per_second": 4.445, |
| "eval_steps_per_second": 0.381, |
| "step": 90 |
| }, |
| { |
| "epoch": 1.7069767441860466, |
| "grad_norm": 0.02925494399865119, |
| "learning_rate": 1.0851338347136358e-05, |
| "loss": 0.0067, |
| "step": 91 |
| }, |
| { |
| "epoch": 1.7255813953488373, |
| "grad_norm": 0.07257358806989027, |
| "learning_rate": 1.060845971551014e-05, |
| "loss": 0.0051, |
| "step": 92 |
| }, |
| { |
| "epoch": 1.744186046511628, |
| "grad_norm": 0.04184666263914448, |
| "learning_rate": 1.0365220230576592e-05, |
| "loss": 0.0055, |
| "step": 93 |
| }, |
| { |
| "epoch": 1.7627906976744185, |
| "grad_norm": 0.049664611722031446, |
| "learning_rate": 1.0121764148019977e-05, |
| "loss": 0.0039, |
| "step": 94 |
| }, |
| { |
| "epoch": 1.7813953488372092, |
| "grad_norm": 0.06078403490104621, |
| "learning_rate": 9.878235851980027e-06, |
| "loss": 0.0096, |
| "step": 95 |
| }, |
| { |
| "epoch": 1.8, |
| "grad_norm": 0.07383805593885986, |
| "learning_rate": 9.634779769423412e-06, |
| "loss": 0.0093, |
| "step": 96 |
| }, |
| { |
| "epoch": 1.8186046511627907, |
| "grad_norm": 0.05984856638753301, |
| "learning_rate": 9.391540284489862e-06, |
| "loss": 0.0086, |
| "step": 97 |
| }, |
| { |
| "epoch": 1.8372093023255816, |
| "grad_norm": 0.04231835514384289, |
| "learning_rate": 9.148661652863644e-06, |
| "loss": 0.0069, |
| "step": 98 |
| }, |
| { |
| "epoch": 1.8558139534883722, |
| "grad_norm": 0.0551763929394583, |
| "learning_rate": 8.906287916221259e-06, |
| "loss": 0.0081, |
| "step": 99 |
| }, |
| { |
| "epoch": 1.8744186046511628, |
| "grad_norm": 0.025498585736095546, |
| "learning_rate": 8.664562816806022e-06, |
| "loss": 0.0027, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.8930232558139535, |
| "grad_norm": 0.021324533986796082, |
| "learning_rate": 8.423629712180265e-06, |
| "loss": 0.0025, |
| "step": 101 |
| }, |
| { |
| "epoch": 1.9116279069767441, |
| "grad_norm": 0.039436626213124555, |
| "learning_rate": 8.183631490205636e-06, |
| "loss": 0.0063, |
| "step": 102 |
| }, |
| { |
| "epoch": 1.9302325581395348, |
| "grad_norm": 0.03604083695947765, |
| "learning_rate": 7.944710484301995e-06, |
| "loss": 0.0078, |
| "step": 103 |
| }, |
| { |
| "epoch": 1.9488372093023256, |
| "grad_norm": 0.044771044931489874, |
| "learning_rate": 7.707008389035102e-06, |
| "loss": 0.0072, |
| "step": 104 |
| }, |
| { |
| "epoch": 1.9674418604651163, |
| "grad_norm": 0.05236595594191565, |
| "learning_rate": 7.470666176083193e-06, |
| "loss": 0.0063, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.9860465116279071, |
| "grad_norm": 0.051764858789243716, |
| "learning_rate": 7.235824010632284e-06, |
| "loss": 0.007, |
| "step": 106 |
| }, |
| { |
| "epoch": 2.0186046511627906, |
| "grad_norm": 0.12615745390263763, |
| "learning_rate": 7.002621168249759e-06, |
| "loss": 0.0078, |
| "step": 107 |
| }, |
| { |
| "epoch": 2.0372093023255813, |
| "grad_norm": 0.0279455329013709, |
| "learning_rate": 6.771195952285541e-06, |
| "loss": 0.002, |
| "step": 108 |
| }, |
| { |
| "epoch": 2.0372093023255813, |
| "eval_loss": 0.0043613542802631855, |
| "eval_runtime": 7.8727, |
| "eval_samples_per_second": 4.446, |
| "eval_steps_per_second": 0.381, |
| "step": 108 |
| }, |
| { |
| "epoch": 2.055813953488372, |
| "grad_norm": 0.050623589284039226, |
| "learning_rate": 6.5416856118498874e-06, |
| "loss": 0.0051, |
| "step": 109 |
| }, |
| { |
| "epoch": 2.0744186046511626, |
| "grad_norm": 0.017741431281156423, |
| "learning_rate": 6.314226260416383e-06, |
| "loss": 0.002, |
| "step": 110 |
| }, |
| { |
| "epoch": 2.0930232558139537, |
| "grad_norm": 0.024515661366764918, |
| "learning_rate": 6.088952795098442e-06, |
| "loss": 0.0019, |
| "step": 111 |
| }, |
| { |
| "epoch": 2.1116279069767443, |
| "grad_norm": 0.0468771272401996, |
| "learning_rate": 5.8659988166471715e-06, |
| "loss": 0.0041, |
| "step": 112 |
| }, |
| { |
| "epoch": 2.130232558139535, |
| "grad_norm": 0.016836146878657435, |
| "learning_rate": 5.645496550218089e-06, |
| "loss": 0.0016, |
| "step": 113 |
| }, |
| { |
| "epoch": 2.1488372093023256, |
| "grad_norm": 0.02263350241807767, |
| "learning_rate": 5.427576766953615e-06, |
| "loss": 0.0021, |
| "step": 114 |
| }, |
| { |
| "epoch": 2.167441860465116, |
| "grad_norm": 0.06977978120600516, |
| "learning_rate": 5.212368706427913e-06, |
| "loss": 0.0045, |
| "step": 115 |
| }, |
| { |
| "epoch": 2.186046511627907, |
| "grad_norm": 0.025790973967087454, |
| "learning_rate": 5.000000000000003e-06, |
| "loss": 0.003, |
| "step": 116 |
| }, |
| { |
| "epoch": 2.2046511627906975, |
| "grad_norm": 0.023915439963659035, |
| "learning_rate": 4.790596595120699e-06, |
| "loss": 0.0013, |
| "step": 117 |
| }, |
| { |
| "epoch": 2.2232558139534886, |
| "grad_norm": 0.021020617529585547, |
| "learning_rate": 4.584282680638155e-06, |
| "loss": 0.004, |
| "step": 118 |
| }, |
| { |
| "epoch": 2.2418604651162792, |
| "grad_norm": 0.04003746322613325, |
| "learning_rate": 4.381180613146396e-06, |
| "loss": 0.0025, |
| "step": 119 |
| }, |
| { |
| "epoch": 2.26046511627907, |
| "grad_norm": 0.057024105592216455, |
| "learning_rate": 4.181410844420473e-06, |
| "loss": 0.0058, |
| "step": 120 |
| }, |
| { |
| "epoch": 2.2790697674418605, |
| "grad_norm": 0.029414663679115195, |
| "learning_rate": 3.9850918499812976e-06, |
| "loss": 0.0015, |
| "step": 121 |
| }, |
| { |
| "epoch": 2.297674418604651, |
| "grad_norm": 0.0239693371918793, |
| "learning_rate": 3.7923400588325156e-06, |
| "loss": 0.0046, |
| "step": 122 |
| }, |
| { |
| "epoch": 2.316279069767442, |
| "grad_norm": 0.028656105914865584, |
| "learning_rate": 3.6032697844110896e-06, |
| "loss": 0.002, |
| "step": 123 |
| }, |
| { |
| "epoch": 2.3348837209302324, |
| "grad_norm": 0.02746226523726054, |
| "learning_rate": 3.4179931567925216e-06, |
| "loss": 0.0047, |
| "step": 124 |
| }, |
| { |
| "epoch": 2.353488372093023, |
| "grad_norm": 0.04015222914102744, |
| "learning_rate": 3.236620056190972e-06, |
| "loss": 0.0024, |
| "step": 125 |
| }, |
| { |
| "epoch": 2.3720930232558137, |
| "grad_norm": 0.046229568639069615, |
| "learning_rate": 3.0592580477936606e-06, |
| "loss": 0.0042, |
| "step": 126 |
| }, |
| { |
| "epoch": 2.3720930232558137, |
| "eval_loss": 0.003030273597687483, |
| "eval_runtime": 7.8714, |
| "eval_samples_per_second": 4.446, |
| "eval_steps_per_second": 0.381, |
| "step": 126 |
| }, |
| { |
| "epoch": 2.390697674418605, |
| "grad_norm": 0.0343368759582639, |
| "learning_rate": 2.8860123179682244e-06, |
| "loss": 0.0056, |
| "step": 127 |
| }, |
| { |
| "epoch": 2.4093023255813955, |
| "grad_norm": 0.04261094397051709, |
| "learning_rate": 2.7169856118808414e-06, |
| "loss": 0.0033, |
| "step": 128 |
| }, |
| { |
| "epoch": 2.427906976744186, |
| "grad_norm": 0.012832760153153636, |
| "learning_rate": 2.5522781725621814e-06, |
| "loss": 0.0013, |
| "step": 129 |
| }, |
| { |
| "epoch": 2.4465116279069767, |
| "grad_norm": 0.008556950871138308, |
| "learning_rate": 2.3919876814572197e-06, |
| "loss": 0.0008, |
| "step": 130 |
| }, |
| { |
| "epoch": 2.4651162790697674, |
| "grad_norm": 0.02894249684533982, |
| "learning_rate": 2.2362092004942583e-06, |
| "loss": 0.003, |
| "step": 131 |
| }, |
| { |
| "epoch": 2.483720930232558, |
| "grad_norm": 0.010447028894947608, |
| "learning_rate": 2.08503511570746e-06, |
| "loss": 0.0038, |
| "step": 132 |
| }, |
| { |
| "epoch": 2.5023255813953487, |
| "grad_norm": 0.03976577778662713, |
| "learning_rate": 1.9385550824463727e-06, |
| "loss": 0.0021, |
| "step": 133 |
| }, |
| { |
| "epoch": 2.5209302325581397, |
| "grad_norm": 0.04259365890515298, |
| "learning_rate": 1.7968559722048906e-06, |
| "loss": 0.0028, |
| "step": 134 |
| }, |
| { |
| "epoch": 2.5395348837209304, |
| "grad_norm": 0.015298497955299525, |
| "learning_rate": 1.660021821101222e-06, |
| "loss": 0.0031, |
| "step": 135 |
| }, |
| { |
| "epoch": 2.558139534883721, |
| "grad_norm": 0.038985035555964956, |
| "learning_rate": 1.528133780039397e-06, |
| "loss": 0.0039, |
| "step": 136 |
| }, |
| { |
| "epoch": 2.5767441860465117, |
| "grad_norm": 0.026143827999310705, |
| "learning_rate": 1.401270066581899e-06, |
| "loss": 0.0016, |
| "step": 137 |
| }, |
| { |
| "epoch": 2.5953488372093023, |
| "grad_norm": 0.02768010703883242, |
| "learning_rate": 1.279505918561923e-06, |
| "loss": 0.0075, |
| "step": 138 |
| }, |
| { |
| "epoch": 2.613953488372093, |
| "grad_norm": 0.06210818527323947, |
| "learning_rate": 1.1629135494628097e-06, |
| "loss": 0.0019, |
| "step": 139 |
| }, |
| { |
| "epoch": 2.6325581395348836, |
| "grad_norm": 0.01524072120387841, |
| "learning_rate": 1.051562105591082e-06, |
| "loss": 0.0016, |
| "step": 140 |
| }, |
| { |
| "epoch": 2.6511627906976747, |
| "grad_norm": 0.04109763655596019, |
| "learning_rate": 9.455176250685338e-07, |
| "loss": 0.0038, |
| "step": 141 |
| }, |
| { |
| "epoch": 2.669767441860465, |
| "grad_norm": 0.030463645149412296, |
| "learning_rate": 8.448429986676298e-07, |
| "loss": 0.0043, |
| "step": 142 |
| }, |
| { |
| "epoch": 2.688372093023256, |
| "grad_norm": 0.027502223705235716, |
| "learning_rate": 7.495979325134806e-07, |
| "loss": 0.0027, |
| "step": 143 |
| }, |
| { |
| "epoch": 2.7069767441860466, |
| "grad_norm": 0.026179924146814967, |
| "learning_rate": 6.598389126745209e-07, |
| "loss": 0.0007, |
| "step": 144 |
| }, |
| { |
| "epoch": 2.7069767441860466, |
| "eval_loss": 0.003121032379567623, |
| "eval_runtime": 7.8794, |
| "eval_samples_per_second": 4.442, |
| "eval_steps_per_second": 0.381, |
| "step": 144 |
| }, |
| { |
| "epoch": 2.7255813953488373, |
| "grad_norm": 0.011982444545489648, |
| "learning_rate": 5.756191716628556e-07, |
| "loss": 0.0013, |
| "step": 145 |
| }, |
| { |
| "epoch": 2.744186046511628, |
| "grad_norm": 0.03330732222099739, |
| "learning_rate": 4.969886568641757e-07, |
| "loss": 0.0032, |
| "step": 146 |
| }, |
| { |
| "epoch": 2.7627906976744185, |
| "grad_norm": 0.023682492055883297, |
| "learning_rate": 4.2399400091594154e-07, |
| "loss": 0.0013, |
| "step": 147 |
| }, |
| { |
| "epoch": 2.781395348837209, |
| "grad_norm": 0.03136060527051506, |
| "learning_rate": 3.566784940514145e-07, |
| "loss": 0.0034, |
| "step": 148 |
| }, |
| { |
| "epoch": 2.8, |
| "grad_norm": 0.029859235605094774, |
| "learning_rate": 2.9508205842594727e-07, |
| "loss": 0.0028, |
| "step": 149 |
| }, |
| { |
| "epoch": 2.818604651162791, |
| "grad_norm": 0.014794714241809361, |
| "learning_rate": 2.392412244407294e-07, |
| "loss": 0.0019, |
| "step": 150 |
| }, |
| { |
| "epoch": 2.8372093023255816, |
| "grad_norm": 0.027214565729087804, |
| "learning_rate": 1.8918910907805733e-07, |
| "loss": 0.0044, |
| "step": 151 |
| }, |
| { |
| "epoch": 2.855813953488372, |
| "grad_norm": 0.05360236347003852, |
| "learning_rate": 1.4495539626097289e-07, |
| "loss": 0.0094, |
| "step": 152 |
| }, |
| { |
| "epoch": 2.874418604651163, |
| "grad_norm": 0.03066827362384079, |
| "learning_rate": 1.0656631924889749e-07, |
| "loss": 0.0022, |
| "step": 153 |
| }, |
| { |
| "epoch": 2.8930232558139535, |
| "grad_norm": 0.03513526613176401, |
| "learning_rate": 7.404464507973608e-08, |
| "loss": 0.0036, |
| "step": 154 |
| }, |
| { |
| "epoch": 2.911627906976744, |
| "grad_norm": 0.021797504007715043, |
| "learning_rate": 4.740966106764222e-08, |
| "loss": 0.0019, |
| "step": 155 |
| }, |
| { |
| "epoch": 2.9302325581395348, |
| "grad_norm": 0.03160297982575043, |
| "learning_rate": 2.667716336448356e-08, |
| "loss": 0.0048, |
| "step": 156 |
| }, |
| { |
| "epoch": 2.948837209302326, |
| "grad_norm": 0.06032839745772416, |
| "learning_rate": 1.1859447591769934e-08, |
| "loss": 0.0033, |
| "step": 157 |
| }, |
| { |
| "epoch": 2.967441860465116, |
| "grad_norm": 0.03226812753869109, |
| "learning_rate": 2.9653015486064143e-09, |
| "loss": 0.0059, |
| "step": 158 |
| }, |
| { |
| "epoch": 2.986046511627907, |
| "grad_norm": 0.052085978194672505, |
| "learning_rate": 0.0, |
| "loss": 0.0034, |
| "step": 159 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 159, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 18, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1.5035543056369582e+18, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|