guipenedo HF Staff hynky HF Staff commited on
Commit
9cfabe2
·
verified ·
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files

Co-authored-by: hynky <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +59 -0
  2. README.md +514 -0
  3. data/afr_Latn/train/000_00000.parquet +3 -0
  4. data/als_Latn/train/000_00000.parquet +3 -0
  5. data/als_Latn/train/000_00001.parquet +3 -0
  6. data/anp_Deva/train/000_00000.parquet +3 -0
  7. data/anp_Deva/train/000_00001.parquet +3 -0
  8. data/arb_Arab/train/000_00000.parquet +3 -0
  9. data/arb_Arab/train/000_00001.parquet +3 -0
  10. data/arb_Arab/train/000_00002.parquet +3 -0
  11. data/arb_Arab/train/000_00003.parquet +3 -0
  12. data/arb_Arab/train/000_00004.parquet +3 -0
  13. data/arb_Arab/train/000_00005.parquet +3 -0
  14. data/arb_Arab/train/000_00006.parquet +3 -0
  15. data/arb_Arab/train/000_00007.parquet +3 -0
  16. data/azj_Latn/train/000_00000.parquet +3 -0
  17. data/azj_Latn/train/000_00001.parquet +3 -0
  18. data/bcc_Arab/train/000_00000.parquet +3 -0
  19. data/ben_Beng/train/000_00000.parquet +3 -0
  20. data/ben_Beng/train/000_00001.parquet +3 -0
  21. data/ben_Beng/train/000_00002.parquet +3 -0
  22. data/bos_Latn/train/000_00000.parquet +3 -0
  23. data/bos_Latn/train/000_00001.parquet +3 -0
  24. data/bos_Latn/train/000_00002.parquet +3 -0
  25. data/bul_Cyrl/train/000_00000.parquet +3 -0
  26. data/bul_Cyrl/train/000_00001.parquet +3 -0
  27. data/bul_Cyrl/train/000_00002.parquet +3 -0
  28. data/bul_Cyrl/train/000_00003.parquet +3 -0
  29. data/cat_Latn/train/000_00000.parquet +3 -0
  30. data/cat_Latn/train/000_00001.parquet +3 -0
  31. data/cat_Latn/train/000_00002.parquet +3 -0
  32. data/cat_Latn/train/000_00003.parquet +3 -0
  33. data/ces_Latn/train/000_00000.parquet +3 -0
  34. data/ces_Latn/train/000_00001.parquet +3 -0
  35. data/ces_Latn/train/000_00002.parquet +3 -0
  36. data/ces_Latn/train/000_00003.parquet +3 -0
  37. data/ces_Latn/train/000_00004.parquet +3 -0
  38. data/ces_Latn/train/000_00005.parquet +3 -0
  39. data/ces_Latn/train/000_00006.parquet +3 -0
  40. data/ces_Latn/train/000_00007.parquet +3 -0
  41. data/cmn_Hani/train/000_00000.parquet +3 -0
  42. data/cmn_Hani/train/000_00001.parquet +3 -0
  43. data/cmn_Hani/train/000_00002.parquet +3 -0
  44. data/cmn_Hani/train/000_00003.parquet +3 -0
  45. data/cmn_Hani/train/000_00004.parquet +3 -0
  46. data/cmn_Hani/train/000_00005.parquet +3 -0
  47. data/cmn_Hani/train/000_00006.parquet +3 -0
  48. data/cmn_Hani/train/000_00007.parquet +3 -0
  49. data/cmn_Hani/train/000_00008.parquet +3 -0
  50. data/cym_Latn/train/000_00000.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
+ *.gif filter=lfs diff=lfs merge=lfs -text
51
+ *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
+ *.jpg filter=lfs diff=lfs merge=lfs -text
55
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: odc-by
3
+ task_categories:
4
+ - text-generation
5
+ language:
6
+ - en
7
+ - de
8
+ - ja
9
+ - fr
10
+ - es
11
+ - it
12
+ - ru
13
+ - pt
14
+ - pl
15
+ - nl
16
+ - cs
17
+ - zh
18
+ - ro
19
+ - sv
20
+ - hu
21
+ - sk
22
+ - uk
23
+ - th
24
+ - da
25
+ - id
26
+ - el
27
+ - fi
28
+ - ca
29
+ - tr
30
+ - dag
31
+ - hr
32
+ - fa
33
+ - bg
34
+ - nb
35
+ - kiu
36
+ - ar
37
+ - vi
38
+ - sr
39
+ - ko
40
+ - sl
41
+ - lt
42
+ - hi
43
+ - he
44
+ - bs
45
+ - ms
46
+ - et
47
+ - lv
48
+ - bn
49
+ - frp
50
+ - is
51
+ - glk
52
+ - eu
53
+ - gl
54
+ - sq
55
+ - mk
56
+ - mr
57
+ - ne
58
+ - ka
59
+ - la
60
+ - pcm
61
+ - mt
62
+ - cy
63
+ - vec
64
+ - hy
65
+ - nrm
66
+ - wuu
67
+ - anp
68
+ - bcc
69
+ - ur
70
+ - af
71
+ - az
72
+ - ta
73
+ - kk
74
+ - nn
75
+ pretty_name: FinePDFs-Edu
76
+ size_categories:
77
+ - n>1T
78
+ configs:
79
+ - config_name: eng_Latn
80
+ default: true
81
+ data_files:
82
+ - split: train
83
+ path: data/eng_Latn/train/*
84
+ - config_name: deu_Latn
85
+ data_files:
86
+ - split: train
87
+ path: data/deu_Latn/train/*
88
+ - config_name: jpn_Jpan
89
+ data_files:
90
+ - split: train
91
+ path: data/jpn_Jpan/train/*
92
+ - config_name: fra_Latn
93
+ data_files:
94
+ - split: train
95
+ path: data/fra_Latn/train/*
96
+ - config_name: spa_Latn
97
+ data_files:
98
+ - split: train
99
+ path: data/spa_Latn/train/*
100
+ - config_name: ita_Latn
101
+ data_files:
102
+ - split: train
103
+ path: data/ita_Latn/train/*
104
+ - config_name: rus_Cyrl
105
+ data_files:
106
+ - split: train
107
+ path: data/rus_Cyrl/train/*
108
+ - config_name: por_Latn
109
+ data_files:
110
+ - split: train
111
+ path: data/por_Latn/train/*
112
+ - config_name: pol_Latn
113
+ data_files:
114
+ - split: train
115
+ path: data/pol_Latn/train/*
116
+ - config_name: nld_Latn
117
+ data_files:
118
+ - split: train
119
+ path: data/nld_Latn/train/*
120
+ - config_name: ces_Latn
121
+ data_files:
122
+ - split: train
123
+ path: data/ces_Latn/train/*
124
+ - config_name: cmn_Hani
125
+ data_files:
126
+ - split: train
127
+ path: data/cmn_Hani/train/*
128
+ - config_name: ron_Latn
129
+ data_files:
130
+ - split: train
131
+ path: data/ron_Latn/train/*
132
+ - config_name: swe_Latn
133
+ data_files:
134
+ - split: train
135
+ path: data/swe_Latn/train/*
136
+ - config_name: hun_Latn
137
+ data_files:
138
+ - split: train
139
+ path: data/hun_Latn/train/*
140
+ - config_name: slk_Latn
141
+ data_files:
142
+ - split: train
143
+ path: data/slk_Latn/train/*
144
+ - config_name: ukr_Cyrl
145
+ data_files:
146
+ - split: train
147
+ path: data/ukr_Cyrl/train/*
148
+ - config_name: tha_Thai
149
+ data_files:
150
+ - split: train
151
+ path: data/tha_Thai/train/*
152
+ - config_name: dan_Latn
153
+ data_files:
154
+ - split: train
155
+ path: data/dan_Latn/train/*
156
+ - config_name: ind_Latn
157
+ data_files:
158
+ - split: train
159
+ path: data/ind_Latn/train/*
160
+ - config_name: ell_Grek
161
+ data_files:
162
+ - split: train
163
+ path: data/ell_Grek/train/*
164
+ - config_name: fin_Latn
165
+ data_files:
166
+ - split: train
167
+ path: data/fin_Latn/train/*
168
+ - config_name: cat_Latn
169
+ data_files:
170
+ - split: train
171
+ path: data/cat_Latn/train/*
172
+ - config_name: tur_Latn
173
+ data_files:
174
+ - split: train
175
+ path: data/tur_Latn/train/*
176
+ - config_name: dag_Latn
177
+ data_files:
178
+ - split: train
179
+ path: data/dag_Latn/train/*
180
+ - config_name: hrv_Latn
181
+ data_files:
182
+ - split: train
183
+ path: data/hrv_Latn/train/*
184
+ - config_name: fas_Arab
185
+ data_files:
186
+ - split: train
187
+ path: data/fas_Arab/train/*
188
+ - config_name: bul_Cyrl
189
+ data_files:
190
+ - split: train
191
+ path: data/bul_Cyrl/train/*
192
+ - config_name: nob_Latn
193
+ data_files:
194
+ - split: train
195
+ path: data/nob_Latn/train/*
196
+ - config_name: kiu_Latn
197
+ data_files:
198
+ - split: train
199
+ path: data/kiu_Latn/train/*
200
+ - config_name: arb_Arab
201
+ data_files:
202
+ - split: train
203
+ path: data/arb_Arab/train/*
204
+ - config_name: vie_Latn
205
+ data_files:
206
+ - split: train
207
+ path: data/vie_Latn/train/*
208
+ - config_name: srp_Cyrl
209
+ data_files:
210
+ - split: train
211
+ path: data/srp_Cyrl/train/*
212
+ - config_name: kor_Hang
213
+ data_files:
214
+ - split: train
215
+ path: data/kor_Hang/train/*
216
+ - config_name: slv_Latn
217
+ data_files:
218
+ - split: train
219
+ path: data/slv_Latn/train/*
220
+ - config_name: lit_Latn
221
+ data_files:
222
+ - split: train
223
+ path: data/lit_Latn/train/*
224
+ - config_name: hin_Deva
225
+ data_files:
226
+ - split: train
227
+ path: data/hin_Deva/train/*
228
+ - config_name: heb_Hebr
229
+ data_files:
230
+ - split: train
231
+ path: data/heb_Hebr/train/*
232
+ - config_name: bos_Latn
233
+ data_files:
234
+ - split: train
235
+ path: data/bos_Latn/train/*
236
+ - config_name: zsm_Latn
237
+ data_files:
238
+ - split: train
239
+ path: data/zsm_Latn/train/*
240
+ - config_name: ekk_Latn
241
+ data_files:
242
+ - split: train
243
+ path: data/ekk_Latn/train/*
244
+ - config_name: lvs_Latn
245
+ data_files:
246
+ - split: train
247
+ path: data/lvs_Latn/train/*
248
+ - config_name: ben_Beng
249
+ data_files:
250
+ - split: train
251
+ path: data/ben_Beng/train/*
252
+ - config_name: frp_Latn
253
+ data_files:
254
+ - split: train
255
+ path: data/frp_Latn/train/*
256
+ - config_name: isl_Latn
257
+ data_files:
258
+ - split: train
259
+ path: data/isl_Latn/train/*
260
+ - config_name: glk_Arab
261
+ data_files:
262
+ - split: train
263
+ path: data/glk_Arab/train/*
264
+ - config_name: eus_Latn
265
+ data_files:
266
+ - split: train
267
+ path: data/eus_Latn/train/*
268
+ - config_name: glg_Latn
269
+ data_files:
270
+ - split: train
271
+ path: data/glg_Latn/train/*
272
+ - config_name: als_Latn
273
+ data_files:
274
+ - split: train
275
+ path: data/als_Latn/train/*
276
+ - config_name: mkd_Cyrl
277
+ data_files:
278
+ - split: train
279
+ path: data/mkd_Cyrl/train/*
280
+ - config_name: mar_Deva
281
+ data_files:
282
+ - split: train
283
+ path: data/mar_Deva/train/*
284
+ - config_name: npi_Deva
285
+ data_files:
286
+ - split: train
287
+ path: data/npi_Deva/train/*
288
+ - config_name: kat_Geor
289
+ data_files:
290
+ - split: train
291
+ path: data/kat_Geor/train/*
292
+ - config_name: lat_Latn
293
+ data_files:
294
+ - split: train
295
+ path: data/lat_Latn/train/*
296
+ - config_name: pcm_Latn
297
+ data_files:
298
+ - split: train
299
+ path: data/pcm_Latn/train/*
300
+ - config_name: mlt_Latn
301
+ data_files:
302
+ - split: train
303
+ path: data/mlt_Latn/train/*
304
+ - config_name: cym_Latn
305
+ data_files:
306
+ - split: train
307
+ path: data/cym_Latn/train/*
308
+ - config_name: vec_Latn
309
+ data_files:
310
+ - split: train
311
+ path: data/vec_Latn/train/*
312
+ - config_name: hye_Armn
313
+ data_files:
314
+ - split: train
315
+ path: data/hye_Armn/train/*
316
+ - config_name: nrm_Latn
317
+ data_files:
318
+ - split: train
319
+ path: data/nrm_Latn/train/*
320
+ - config_name: wuu_Hani
321
+ data_files:
322
+ - split: train
323
+ path: data/wuu_Hani/train/*
324
+ - config_name: anp_Deva
325
+ data_files:
326
+ - split: train
327
+ path: data/anp_Deva/train/*
328
+ - config_name: bcc_Arab
329
+ data_files:
330
+ - split: train
331
+ path: data/bcc_Arab/train/*
332
+ - config_name: urd_Arab
333
+ data_files:
334
+ - split: train
335
+ path: data/urd_Arab/train/*
336
+ - config_name: afr_Latn
337
+ data_files:
338
+ - split: train
339
+ path: data/afr_Latn/train/*
340
+ - config_name: azj_Latn
341
+ data_files:
342
+ - split: train
343
+ path: data/azj_Latn/train/*
344
+ - config_name: tam_Taml
345
+ data_files:
346
+ - split: train
347
+ path: data/tam_Taml/train/*
348
+ - config_name: kaz_Cyrl
349
+ data_files:
350
+ - split: train
351
+ path: data/kaz_Cyrl/train/*
352
+ - config_name: nno_Latn
353
+ data_files:
354
+ - split: train
355
+ path: data/nno_Latn/train/*
356
+ ---
357
+
358
+ # 📚 FinePDFs-Edu
359
+
360
+ ![FinePDFs](https://cdn-uploads.huggingface.co/production/uploads/626ede24d2fa9e7d598c8709/dgGeCo6yfZvThn-Fc6Q8k.png)
361
+
362
+ > 350B+ of highly educational tokens from PDFs 📄
363
+
364
+ ## What is it?
365
+
366
+ 📚 FinePDFs-Edu dataset consists of **350B+ tokens** of educational PDFs filtered from 📄 [FinePDFs](https://huggingface.co/datasets/HuggingFaceFW/finepdfs) dataset covering 69 languages.
367
+
368
+ FinePDFs was created using the formula inspired from [FineWeb-Edu](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu), we developed an [educational quality classifier](HuggingFaceFW/finepdfs_edu_classifier_eng_Latn) using annotations generated by Qwen3-235B-A22B-Instruct-2507 for each of 69 languages present in this dataset.
369
+ We then used this classifier to retain only the most educational web pages. FinePDFs-Edu outperforms FinePDFs on popular benchmarks and shows the power of classifiers trained on synthetic data.
370
+
371
+ The [Dataset Curation](https://huggingface.co/datasets/HuggingFaceFW/finepdfs_edu#dataset-curation) section details the process for creating the dataset.
372
+ While it might seem that the dataset is an order of magnitude smaller than FineWeb-Edu, unlike its web ancestor, this dataset is globally deduplicated!
373
+
374
+
375
+ ![datasets_comparison_edu](https://cdn-uploads.huggingface.co/production/uploads/626ede24d2fa9e7d598c8709/ivVKeFDP2J2MAyQL9s4xy.png)
376
+
377
+ ## What is being released?
378
+
379
+ Along with the dataset, which includes all filtered CommonCrawl dumps since `CC-MAIN-2013-20` to `CC-MAIN-2025-08`, we also release:
380
+ - The [educational classifier](https://huggingface.co/HuggingFaceFW/finepdfs_edu_classifier_eng_Latn) used for the filtering (for each language)
381
+ - The [dataset](https://huggingface.co/datasets/HuggingFaceFW/finepdfs_eng_Latn_labeled) with educational (and 3 other) labels by Qwen3-235B-A22B-Instruct-2507 for English.
382
+ - The [dataset](HuggingFaceFW/finepdfs_fw_edu_labeled) with educational labels by Qwen3-235B-A22B-Instruct-2507 for 69 languages beyond English.
383
+ - The [code](https://github.com/huggingface/finepdfs) for training it and running inference.
384
+
385
+ ## How to download and use 📄 FinePDFs-Edu
386
+
387
+ See the tables above for the `subset` of the language you want to download.
388
+
389
+ We currently do not provide smaller `sample` versions, but by setting `limit` or using `streaming=True` you can easily fetch a sample of the data. If there is interest from the community we might upload smaller sampled versions later on.
390
+
391
+ ### Using 🏭 [`datatrove`](https://github.com/huggingface/datatrove/)
392
+
393
+ ```python
394
+ from datatrove.pipeline.readers import ParquetReader
395
+
396
+ # limit determines how many documents will be streamed (remove for all)
397
+ # this will fetch the Portuguese filtered data
398
+ data_reader = ParquetReader("hf://datasets/HuggingFaceFW/finepdfs-edu/data/por_Latn/train", limit=1000)
399
+ for document in data_reader():
400
+ # do something with document
401
+ print(document)
402
+
403
+ ###############################
404
+ # OR for a processing pipeline:
405
+ ###############################
406
+
407
+ from datatrove.executor import LocalPipelineExecutor
408
+ from datatrove.pipeline.readers import ParquetReader
409
+ from datatrove.pipeline.filters import LambdaFilter
410
+ from datatrove.pipeline.writers import JsonlWriter
411
+
412
+ pipeline_exec = LocalPipelineExecutor(
413
+ pipeline=[
414
+ ParquetReader("hf://datasets/HuggingFaceFW/finepdfs-edu/data/por_Latn/train", limit=1000),
415
+ LambdaFilter(lambda doc: "hugging" in doc.text),
416
+ JsonlWriter("some-output-path")
417
+ ],
418
+ tasks=10
419
+ )
420
+ pipeline_exec.run()
421
+ ```
422
+
423
+ ### Using `huggingface_hub`
424
+
425
+ ```python
426
+ from huggingface_hub import snapshot_download
427
+ folder = snapshot_download(
428
+ "HuggingFaceFW/finepdfs-edu",
429
+ repo_type="dataset",
430
+ local_dir="./finepdfs-edu/",
431
+ # download the Czech filtered
432
+ allow_patterns=["data/ces_Latn/train/*"])
433
+ ```
434
+
435
+ For faster downloads, make sure to install `pip install huggingface_hub[hf_transfer]` and set the environment variable `HF_HUB_ENABLE_HF_TRANSFER=1`.
436
+
437
+ ### Using `datasets`
438
+ ```python
439
+ from datasets import load_dataset
440
+ # get Croatian data
441
+ fw = load_dataset("HuggingFaceFW/finepdfs-edu", name="hrv_Latn", split="train", streaming=True)
442
+ ```
443
+
444
+ Similiar to original FinePDFs, this dataset contains high amount of language switching samples, we thus recommend using the [filtering function](https://huggingface.co/datasets/HuggingFaceFW/finepdfs#code-switching) if this is not desired.
445
+
446
+ ## Dataset curation
447
+ We have used the same approach for FineWeb-Edu with minimal adjustments of the prompt. To scale to languages beyond English we decided to train separate classifier for each.
448
+
449
+ ### Educational Scoring
450
+
451
+ We used [Qwen3-235B-A22B-Instruct-2507](https://huggingface.co/Qwen/Qwen3-235B-A22B-Instruct-2507) to score approximately 300,000 FinePDFs samples for educational quality on a 0–5 scale. The final prompt used for scoring is available [here](https://huggingface.co/HuggingFaceFW/finepdfs_edu_classifier_eng_Latn/blob/main/prompt.txt).
452
+
453
+ After experimenting with several prompt variants, we found that the **FineWeb-Edu** prompt yielded the most consistent and reliable results. As in FineWeb-Edu, we observed that highly technical or graduate-level content did not correlate well with the benchmarks we track. However, unlike in FineWeb-Edu, the overall average score was noticeably lower—if we had used a fixed threshold of `score = 3`, only about 2% of samples would have been retained.
454
+ To address this, we instead selected the **top 10%** of samples based on their education score.
455
+
456
+ | Threshold | Drop Rate |
457
+ | :-------: | :-------: |
458
+ | 1 | 0.3028 |
459
+ | 2 | 0.9451 |
460
+ | 3 | 0.9802 |
461
+ | 4 | 0.9906 |
462
+ | 5 | 0.9987 |
463
+
464
+ We also replaced the teacher model to improve multilingual coverage and take advantage of the better inference efficiency offered by Mixture-of-Experts (MoE) architectures. To identify a suitable model, we aimed for one that was most *“Claude-like”*, i.e., whose scoring behavior most closely matched **Claude Sonnet-4**. We compared models using mean squared error (MSE) on a 10k-sample development set and found that **Qwen3-235B-A22B-Instruct-2507** was both the most Claude-like and highly efficient—processing up to **14 chunks/sec on a single H100 GPU**.
465
+
466
+ | Model | MSE (vs. Sonnet-4) |
467
+ | :-------------------------------------------- | -----------------: |
468
+ | Qwen_Qwen3-235B-A22B-Instruct-2507 | **0.398** |
469
+ | Qwen_Qwen3-235B-A22B-Thinking-2507 | 0.812 |
470
+ | Qwen_Qwen3-30B-A3B-Instruct-2507 | 0.364 |
471
+ | Qwen_Qwen3-30B-A3B-Thinking-2507 | 0.925 |
472
+ | google_gemma-3-27b-it | 2.727 |
473
+ | meta-llama_Llama-3.3-70B-Instruct | 0.553 |
474
+ | meta-llama_Llama-4-Maverick-17B-128E-Instruct | 0.707 |
475
+ | meta-llama_Llama-4-Scout-17B-16E-Instruct | 1.177 |
476
+ | mistralai_Magistral-Small-2507 | 0.717 |
477
+ | zai-org_GLM-4.5-Air-FP8 | 0.510 |
478
+
479
+ For long documents, we take the first 2,048 tokens from the top of the document. If the document exceeds 10,000 characters, we also take the last 2,048 tokens and compute the final score as `max(top_score, bottom_score)`.
480
+
481
+ ### Classifier Training
482
+
483
+ We fine-tuned a BERT-like regression model using these annotations, based on [answerdotai/ModernBERT-large](https://huggingface.co/answerdotai/ModernBERT-large) for English and [jhu-clsp/mmBERT-base](https://huggingface.co/jhu-clsp/mmBERT-base) for other languages. Both models achieved the best F1 performance among the options we evaluated, while supporting FA2, which allowed us to label over 220 samples per second on an H100 GPU.
484
+
485
+ For each model, we unfroze both the classifier head and the last four transformer layers. To address severe class imbalance, we rebalanced the training data.
486
+
487
+ The resulting classifiers are available at:
488
+ `https://huggingface.co/HuggingFaceFW/finepdfs_edu_classifier_{lang}`
489
+
490
+ ### Filtering and results
491
+
492
+ We then built 📚 FinePDFs-Edu by filtering out 90% of samples with lowest edu score for each language. Our ablation demonstrated that this refined dataset surpasses 📄 FinePDFs and all other open web datasets, with remarkable improvements on educational benchmarks such as MMLU and ARC.
493
+ You will find all the ablation models and datasets in [this collection](https://huggingface.co/collections/HuggingFaceFW/finepdfs).
494
+
495
+ ## Considerations for Using the Data
496
+ See: [FinePDFs](https://huggingface.co/datasets/HuggingFaceFW/finepdfs).
497
+
498
+ ## Additional Information
499
+
500
+ ### Licensing Information
501
+
502
+ The dataset is released under the **Open Data Commons Attribution License (ODC-By) v1.0** [license](https://opendatacommons.org/licenses/by/1-0/). The use of this dataset is also subject to [CommonCrawl's Terms of Use](https://commoncrawl.org/terms-of-use).
503
+
504
+ ## Citation Information
505
+ ```
506
+ @misc{kydlicek2025finepdfs,
507
+ title={FinePDFs},
508
+ author={Hynek Kydl{\'\i}{\v{c}}ek and Guilherme Penedo and Leandro von Werra},
509
+ year={2025},
510
+ publisher = {Hugging Face},
511
+ journal = {Hugging Face repository},
512
+ howpublished = {\url{https://huggingface.co/datasets/HuggingFaceFW/finepdfs_edu}}
513
+ }
514
+ ```
data/afr_Latn/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02f351fa65b79f5c15e1240b9dc1b2dba07c2bb31c49648195dbc3e510160d66
3
+ size 116011837
data/als_Latn/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ca7c98fac31373a995faf66114160d1754b0a827adcda4deb7e84cad4ab931f
3
+ size 449330518
data/als_Latn/train/000_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13aca1efd94e4f384b4a870691372bd539654c501a5dc8224ddf97ef131d57c5
3
+ size 217606773
data/anp_Deva/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:408e4cf57dca1aa0eb0cde16a17e26ef71573489d4b18d5b34594a7436d81a21
3
+ size 199282154
data/anp_Deva/train/000_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17b5d0163561c9a85d12abfdddb6c52a94437a518221cf9bd347f16256ce4567
3
+ size 96297094
data/arb_Arab/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:284247eab7c62c2739cda949efe8dde671e77f5e4b47ab0f760bc65a20333b39
3
+ size 1095984018
data/arb_Arab/train/000_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9be505979cc7de9f68f3bef32ee707bbb8133b515a5d26e07be2cd512cdc952f
3
+ size 956050689
data/arb_Arab/train/000_00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb33d2b7ac651745dc1642aead5708d704e5fb9976c3b52ebca10bb5a0a3a37f
3
+ size 961543198
data/arb_Arab/train/000_00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bba73316f1c1d454737007b2bc6cf4f2522e2958513fc9a4a3f49afc5f368b03
3
+ size 965240812
data/arb_Arab/train/000_00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79951f2cacc2b8f3cffee3077c0e4e3498fa0f4804b16a6f4b029260ed0cec4c
3
+ size 1034665363
data/arb_Arab/train/000_00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea16cf5dc4a4019487eb982dadb52c7adecc4895ab92d8835bcc2c3255f57f8a
3
+ size 943692029
data/arb_Arab/train/000_00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6addcf8c8f906a937cc454dc916f1986dc26353496a8eae8699bced43bf724de
3
+ size 915109663
data/arb_Arab/train/000_00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6f76b3cea79f472036a3f0a16302c94d5df3ef76b03c9e679fce0271cc5033e
3
+ size 712573536
data/azj_Latn/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f621c7237c4658b5a9fb415ced47b8804117ab9e5f7beb498e2962d8c8fcb7c7
3
+ size 407852089
data/azj_Latn/train/000_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84f78bf17a35e60fb4d430499b4d82b97d4682ea0d02b07a7f10dcb1097e241b
3
+ size 430897770
data/bcc_Arab/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec359e05632abac62958d836e69c32cc7d58bc5c4f9a8991024f38ae1b037236
3
+ size 179530944
data/ben_Beng/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaa9bfbee754df11b9bf6cbaa27f1ab1cac4e9b16d9468e97cd60fc90fc35324
3
+ size 990859145
data/ben_Beng/train/000_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ac75076598417b2c091764e73a3a1b2c06695420328c09452dc66127300423c
3
+ size 994227640
data/ben_Beng/train/000_00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe9759fd9e38e5522019582208efb733059de67b484f087708a99fe055a180d1
3
+ size 980579127
data/bos_Latn/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac1acdda463825cb2349c2e7c3096fd64efefcadfed079bcc5bc693a26d7fa41
3
+ size 801601218
data/bos_Latn/train/000_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e21ce51ef22bb188409997e63cbd1f8438d45ff01ecd02ec23d00d13fc8c0b6d
3
+ size 732788652
data/bos_Latn/train/000_00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:551e6a525e1b4b482cdd4b270d22a249be330d20bc78d0c191730a4e9bd281b4
3
+ size 534899945
data/bul_Cyrl/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f72ab44c070f36d38707a9d12ba2c646f3446a607c21792e1848fe4c2cc990c
3
+ size 896553074
data/bul_Cyrl/train/000_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec269221cd79c7c55b5c8493a1f99580a066ff33f60bda70565763f6783e8bee
3
+ size 880928167
data/bul_Cyrl/train/000_00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2d54662939824fcfe6d601ef2e807cc98758fa6123d77909e786d728e5796ae
3
+ size 864810098
data/bul_Cyrl/train/000_00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be3fe6f6a0dcc30a5cad3e226987c59413990adcd8bf2e3342baba0f472ee5f1
3
+ size 582376250
data/cat_Latn/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc5f8626d6e65e2a49279d86584338d7342c4f92853b804a5ad2f2203ba4e4ab
3
+ size 715953886
data/cat_Latn/train/000_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7103a4f1712d926c068fa845b872097ab882f9bd47e0c89ecfba579358ca3c72
3
+ size 534466995
data/cat_Latn/train/000_00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7fbd279075bb94053d41a12b4ea092a9c9afcef641a80c2670b2d5fcf920e0d
3
+ size 517020441
data/cat_Latn/train/000_00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b48c02835be3b433342931f01e81f54e4512ff0d000ac549818e4e193729a73
3
+ size 522137892
data/ces_Latn/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d679888f33c481069756a3dbc212006b940ae0463c71b165718dd23d2937a514
3
+ size 1290580368
data/ces_Latn/train/000_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a34698e945b3e4a54243d9150de27f726ca17b554cbd6c7705290771a721b4c5
3
+ size 1312037532
data/ces_Latn/train/000_00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db47fde9943c2c71b3d3975b564b6aabf534f3c4692d9f2d7280153e071ff432
3
+ size 1264615923
data/ces_Latn/train/000_00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f343ad925346f219141a652c3f0ec0d43e91a86ef388bdae4dc01099ed61d86
3
+ size 1273799881
data/ces_Latn/train/000_00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb6e15c0024c34960b312c4faaa57286832acea2391fd25e92ba30feec7af9a3
3
+ size 1269083862
data/ces_Latn/train/000_00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f20f55991a998db7d6eaa529210486ed48b8015cab2b12ec9a5d70f40564b2d
3
+ size 1265819327
data/ces_Latn/train/000_00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1afb43de3424f0fec876c83d2a6a09666cfea2b594fcdfc0eebe38d88bc8acb
3
+ size 1262494137
data/ces_Latn/train/000_00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:071e28d46c6f496472f4e3030c85917268abfa712fffc9786d304639c99f918c
3
+ size 959282175
data/cmn_Hani/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afe0b41789b743872b11531f697a5dac3711655304197285061f4634b23eedd3
3
+ size 842772896
data/cmn_Hani/train/000_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bca7550a436a3ce224b027327ed332fdc768b1d84ecfc7c5e168857d817e5ad7
3
+ size 882279872
data/cmn_Hani/train/000_00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e5e7f7f8c2a4eb4cb334bdf49603bb3e9b4ad690017779333bd4302a630ac66
3
+ size 848199570
data/cmn_Hani/train/000_00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b25857f9eb86bf16aef0857a39ae4e7623d9b12aa6ad4b90a52b4de3f4eb11a
3
+ size 819614416
data/cmn_Hani/train/000_00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a81bd4137706d0573782f7480986c58a498311fde201087d1594a3cb18db610a
3
+ size 881028834
data/cmn_Hani/train/000_00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70d6071716021c818051a98dcf48596639d50fa5dd08fffb474aaa2b67f31a1a
3
+ size 830577454
data/cmn_Hani/train/000_00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93e0d1df47246a4781d938766ab32db1f37c3d94f79c718a1aebb43dd338b888
3
+ size 788821594
data/cmn_Hani/train/000_00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d6df45bdf1550061edebca00d60a9615f6665d15c7660c7827f59a3bd2b893c
3
+ size 632007692
data/cmn_Hani/train/000_00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0ebba7bd845875f47c6b020027eecd575c74372b9ed42a5176c2a28ff66e850
3
+ size 645458559
data/cym_Latn/train/000_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d00a9bc8c9ccfb28e85736bcfec7ebf70a319b13d944ace5112216ca858c2efd
3
+ size 130239230