jobs-git yury-zyphra commited on
Commit
466f478
·
verified ·
0 Parent(s):

Duplicate from Zyphra/Zyda-2

Browse files

Co-authored-by: Yury Tokpanov <yury-zyphra@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +58 -0
  2. README.md +170 -0
  3. data/dclm_crossdeduped/global-shard_01_of_10/part.0.parquet +3 -0
  4. data/dclm_crossdeduped/global-shard_01_of_10/part.1.parquet +3 -0
  5. data/dclm_crossdeduped/global-shard_01_of_10/part.10.parquet +3 -0
  6. data/dclm_crossdeduped/global-shard_01_of_10/part.100.parquet +3 -0
  7. data/dclm_crossdeduped/global-shard_01_of_10/part.1000.parquet +3 -0
  8. data/dclm_crossdeduped/global-shard_01_of_10/part.1001.parquet +3 -0
  9. data/dclm_crossdeduped/global-shard_01_of_10/part.1002.parquet +3 -0
  10. data/dclm_crossdeduped/global-shard_01_of_10/part.1003.parquet +3 -0
  11. data/dclm_crossdeduped/global-shard_01_of_10/part.1004.parquet +3 -0
  12. data/dclm_crossdeduped/global-shard_01_of_10/part.1005.parquet +3 -0
  13. data/dclm_crossdeduped/global-shard_01_of_10/part.1006.parquet +3 -0
  14. data/dclm_crossdeduped/global-shard_01_of_10/part.1007.parquet +3 -0
  15. data/dclm_crossdeduped/global-shard_01_of_10/part.1008.parquet +3 -0
  16. data/dclm_crossdeduped/global-shard_01_of_10/part.1009.parquet +3 -0
  17. data/dclm_crossdeduped/global-shard_01_of_10/part.101.parquet +3 -0
  18. data/dclm_crossdeduped/global-shard_01_of_10/part.1010.parquet +3 -0
  19. data/dclm_crossdeduped/global-shard_01_of_10/part.1011.parquet +3 -0
  20. data/dclm_crossdeduped/global-shard_01_of_10/part.1012.parquet +3 -0
  21. data/dclm_crossdeduped/global-shard_01_of_10/part.1013.parquet +3 -0
  22. data/dclm_crossdeduped/global-shard_01_of_10/part.1014.parquet +3 -0
  23. data/dclm_crossdeduped/global-shard_01_of_10/part.1015.parquet +3 -0
  24. data/dclm_crossdeduped/global-shard_01_of_10/part.1016.parquet +3 -0
  25. data/dclm_crossdeduped/global-shard_01_of_10/part.1017.parquet +3 -0
  26. data/dclm_crossdeduped/global-shard_01_of_10/part.1018.parquet +3 -0
  27. data/dclm_crossdeduped/global-shard_01_of_10/part.1019.parquet +3 -0
  28. data/dclm_crossdeduped/global-shard_01_of_10/part.102.parquet +3 -0
  29. data/dclm_crossdeduped/global-shard_01_of_10/part.1020.parquet +3 -0
  30. data/dclm_crossdeduped/global-shard_01_of_10/part.1021.parquet +3 -0
  31. data/dclm_crossdeduped/global-shard_01_of_10/part.1022.parquet +3 -0
  32. data/dclm_crossdeduped/global-shard_01_of_10/part.1023.parquet +3 -0
  33. data/dclm_crossdeduped/global-shard_01_of_10/part.1024.parquet +3 -0
  34. data/dclm_crossdeduped/global-shard_01_of_10/part.1025.parquet +3 -0
  35. data/dclm_crossdeduped/global-shard_01_of_10/part.1026.parquet +3 -0
  36. data/dclm_crossdeduped/global-shard_01_of_10/part.1027.parquet +3 -0
  37. data/dclm_crossdeduped/global-shard_01_of_10/part.1028.parquet +3 -0
  38. data/dclm_crossdeduped/global-shard_01_of_10/part.1029.parquet +3 -0
  39. data/dclm_crossdeduped/global-shard_01_of_10/part.103.parquet +3 -0
  40. data/dclm_crossdeduped/global-shard_01_of_10/part.1030.parquet +3 -0
  41. data/dclm_crossdeduped/global-shard_01_of_10/part.1031.parquet +3 -0
  42. data/dclm_crossdeduped/global-shard_01_of_10/part.1032.parquet +3 -0
  43. data/dclm_crossdeduped/global-shard_01_of_10/part.1033.parquet +3 -0
  44. data/dclm_crossdeduped/global-shard_01_of_10/part.1034.parquet +3 -0
  45. data/dclm_crossdeduped/global-shard_01_of_10/part.1035.parquet +3 -0
  46. data/dclm_crossdeduped/global-shard_01_of_10/part.1036.parquet +3 -0
  47. data/dclm_crossdeduped/global-shard_01_of_10/part.1037.parquet +3 -0
  48. data/dclm_crossdeduped/global-shard_01_of_10/part.1038.parquet +3 -0
  49. data/dclm_crossdeduped/global-shard_01_of_10/part.1039.parquet +3 -0
  50. data/dclm_crossdeduped/global-shard_01_of_10/part.104.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
56
+ # Video files - compressed
57
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
58
+ *.webm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: odc-by
3
+ pretty_name: Zyda-2
4
+ task_categories:
5
+ - text-generation
6
+ language:
7
+ - en
8
+ size_categories:
9
+ - n>1T
10
+ configs:
11
+ - config_name: default
12
+ data_files:
13
+ - split: train
14
+ path: data/*/*/*
15
+ - config_name: sample-100BT
16
+ data_files:
17
+ - split: train
18
+ path: sample/100BT/*/*
19
+ - config_name: dclm_crossdeduped
20
+ data_files:
21
+ - split: train
22
+ path: data/dclm_crossdeduped/*/*
23
+ - config_name: zyda_crossdeduped-filtered
24
+ data_files:
25
+ - split: train
26
+ path: data/zyda_crossdeduped-filtered /*/*
27
+ - config_name: dolma-cc_crossdeduped-filtered
28
+ data_files:
29
+ - split: train
30
+ path: data/dolma-cc_crossdeduped-filtered/*
31
+ - config_name: fwe3
32
+ data_files:
33
+ - split: train
34
+ path: data/fwe3/*/*
35
+ ---
36
+
37
+ # Zyda-2
38
+
39
+ <!-- Provide a quick summary of the dataset. -->
40
+
41
+ Zyda-2 is a 5 trillion token language modeling dataset created by collecting open and high quality datasets and combining them and cross-deduplication and model-based quality filtering. Zyda-2 comprises diverse sources of web data, highly educational content, math, code, and scientific papers.
42
+
43
+ To construct Zyda-2, we took the best open-source datasets available: [Zyda](https://huggingface.co/datasets/Zyphra/Zyda), [FineWeb](https://huggingface.co/datasets/HuggingFaceFW/fineweb), [DCLM](https://huggingface.co/datasets/mlfoundations/dclm-baseline-1.0), and [Dolma](https://huggingface.co/datasets/allenai/dolma). Models trained on Zyda-2 significantly outperform identical models trained on the Pile, RefinedWeb, FineWeb, FineWeb-Edu, and DCLM. Due to our post-processing deduplication, filtering, and weighting pipeline, Zyda-2 outperforms all its constituent datasets in resulting model quality.
44
+
45
+ An early version of Zyda-2 was used as the primary dataset for phase 1 pretraining of our Zamba2 [series](https://huggingface.co/Zyphra/Zamba2-7B) [of](Zyphra/Zamba2-2.7B) [models](Zyphra/Zamba2-1.2B) which perform extremely strongly on a per-token basis and are often state-of-the-art for their size, testifying to the strength of Zyda-2 as a pretraining dataset.
46
+
47
+ According to our evaluations, Zyda-2 is the most performant per-token open dataset available. Zyda-2 excels at educational and natural language reasoning content. For code performance, we recommend mixing it with a pure code dataset such as [Starcoder](https://huggingface.co/bigcode/starcoder).
48
+
49
+
50
+ <center>
51
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/65455aca468722e935103b17/-nxHBcU38QJ-MNdKXPiYS.png" width="600" alt="Zyda-2 evaluation scores">
52
+ </center>
53
+
54
+
55
+ For more information, please see our [technical blog](https://www.zyphra.com/post/building-zyda-2).
56
+
57
+ ## How to download
58
+ We preserved the schemas of original component datasets, meaning that every component has its own schema. For that reason attempting to download the whole dataset using `datasets.load_dataset()` will fail during the stage of generating a split. If you attempt to stream the default config, it will also fail.
59
+
60
+ To download the whole dataset we recommend to either clone the repository, or, if you must use the `datasets.load_dataset()`, download individual components separately.
61
+
62
+ Only `nemo_id` and `text` are common columns between the components. Select those for every component first, and only then interleave the datasets with optimal weights (see example at the bottom of this section).
63
+
64
+ Example command to clone the repository using huggingface-cli: `huggingface-cli download Zyphra/Zyda-2 --repo-type dataset`
65
+
66
+ Commands to download individual components:
67
+ - DCLM: `ds_dclm = datasets.load_dataset("Zyphra/Zyda-2", name="dclm_crossdeduped", split="train")`
68
+ - Zyda: `ds_zyda = datasets.load_dataset("Zyphra/Zyda-2", name="zyda_crossdeduped-filtered", split="train")`
69
+ - Dolma-CC: `ds_dolma = datasets.load_dataset("Zyphra/Zyda-2", name="dolma-cc_crossdeduped-filtered", split="train")`
70
+ - Fineweb-Edu: `ds_fwe = datasets.load_dataset("Zyphra/Zyda-2", name="fwe3", split="train")`
71
+
72
+ In this repository we provide raw results of cross deduplication and filtering. To achieve the best possible performance, one will need to use appropriate weights during training.
73
+ We found the following optimal weights by number of tokens (in the sense of weights in the resultant dataset): DCLM - 4.0, FWE3 - 4.0, Zyda - 0.16, Dolma-CC - 0.24.
74
+
75
+ Below you will find an example of how to get proper dataset object.
76
+ It demonstrates how to select only `nemo_id` and `text` columns, and then interleave the datasets with probabilities computed from the weights above.
77
+ One needs to be careful with weights normalization, as `interleave_datasets()` returns documents, while our weights are token-wise. We provide precomputed document-wise weights in the example below.
78
+ To stream the dataset, add `streaming=True` to the `load_dataset()` commands.
79
+
80
+ ```
81
+ common_columns = ["nemo_id", "text"]
82
+ ds_dclm = ds_dclm.select_columns(common_columns)
83
+ ds_zyda = ds_zyda.select_columns(common_columns)
84
+ ds_dolma = ds_dolma.select_columns(common_columns)
85
+ ds_fwe = ds_zyda.select_columns(common_columns)
86
+ norm_weights = [0.4038, 0.0316, 0.0585, 0.5061]
87
+ ds = datasets.interleave_datasets([ds_dclm, ds_zyda, ds_dolma, ds_fwe], probabilities=norm_weights, stopping_strategy="all_exhausted")
88
+ ```
89
+
90
+ ### (Smaller) sample version
91
+ Along with the configs above, you can also download a smaller version of the dataset with the following config:
92
+ - `sample-100BT`: a subset randomly sampled from the whole dataset of around 100B gpt-neox tokens (252GB, 91.2M documents).
93
+
94
+ This sample only has common columns `nemo-id` and `text`. In addition, it was sampled according to optimal weights, so you can start using it directly.
95
+
96
+ `ds_sample = datasets.load_dataset("Zyphra/Zyda-2", name="sample-100BT", split="train")`
97
+
98
+ ## Breakdown by component
99
+
100
+ | Component | Download size (parquet, GBs) | Documents (millions) | gpt-neox tokens (billions) |
101
+ | --- | --- | --- | --- |
102
+ | dclm-crossdeduped | 8,469.4 | 2,590.5 | 3,348.942 |
103
+ | zyda-crossdeduped-filtered | 452.4 | 247.7 | 163.6 |
104
+ | dolma_cc-crossdeduped-filtered | 668.2 | 445.6 | 238.4 |
105
+ | fwe3 | 3,490.5 | 1,279.1 | 1,319.2 |
106
+ | Total | 13,080.5 | 4,562.8 | 5,070.2 |
107
+
108
+ ### Dataset Description
109
+
110
+ <!-- Provide a longer summary of what this dataset is. -->
111
+
112
+ - **Curated by:** Zyphra
113
+ - **Language(s) (NLP):** Primarily English
114
+ - **License:** Open Data Commons License
115
+
116
+
117
+ ## Dataset Structure
118
+
119
+ <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
120
+
121
+ Each component has their own individual schema. Please, consult with their respective sources for exact information.
122
+
123
+ However, in all components the document text is in the `text` column, and the unique document id is in the `nemo_id` column.
124
+
125
+ Our Zyda-1 and Dolma-CC versions also have two additional columns corresponding to prediction of Nvidia's quality model (https://huggingface.co/nvidia/quality-classifier-deberta): `quality_prob` and `quality_pred`.
126
+
127
+ ### Source Data
128
+
129
+ Zyda-2 is comprised of four high quality open-source datasets:
130
+
131
+ Zyda-1: https://huggingface.co/datasets/Zyphra/Zyda
132
+
133
+ Dolma-CC v1.7: https://huggingface.co/datasets/allenai/dolma
134
+
135
+ DCLM-baseline: https://huggingface.co/datasets/mlfoundations/dclm-baseline-1.0
136
+
137
+ FineWeb-Edu-score2: https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu-score-2
138
+
139
+ <center>
140
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/65c05e75c084467acab2f84a/GQenkNxzyM65M4eR2YZcV.png" width="600" alt="Zyda-2 dataset composition">
141
+ </center>
142
+
143
+ #### Personal and Sensitive Information
144
+
145
+ As a language modeling dataset, it likely contains PII which has not been filtered out of the component datasets and which may have been missed by our own filters.
146
+
147
+ ## Bias, Risks, and Limitations
148
+
149
+ As a dataset comprised of open web scrapes, it is likely that it contains biased and toxic content.
150
+
151
+ ## Licensing Information
152
+
153
+ We are releasing this dataset under the terms of [ODC-BY](https://opendatacommons.org/licenses/by/1-0/). By using this dataset, you are also bound by any license agreements and terms of use of the original data sources.
154
+
155
+ ## Citation
156
+
157
+ If you use our dataset to train a model, please cite us at:
158
+
159
+ ```
160
+ @misc{zyphra_nvidia_2024,
161
+ author = {Yury Tokpanov, Paolo Glorioso, Ayush Dattagupta, Vibhu Jawa, Ryan Wolf, Vikranth Jeyakumar, Arham Mehta, Quentin Anthony, Beren Millidge},
162
+ title = {Building {Zyda-2}, a 5 {Trillion} {Token} {High-Quality} {Dataset}, with {NVIDIA} {NeMo} {Curator}},
163
+ url = {https://www.zyphra.com/post/building-zyda-2},
164
+ publisher = {Zyphra},
165
+ year = {2024},
166
+ month = {October},
167
+ day = {15}
168
+ }
169
+ ```
170
+
data/dclm_crossdeduped/global-shard_01_of_10/part.0.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0f754eea66e74a607905495d1ab0b200a38c952ebb5c680c8400b5129d81f76
3
+ size 89097060
data/dclm_crossdeduped/global-shard_01_of_10/part.1.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0e7870cbbac8489627ba34caf4aecb24dde7c711c767f8bcc955de72a92c5c9
3
+ size 90316648
data/dclm_crossdeduped/global-shard_01_of_10/part.10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4c5662cf89203a77fab2248337e59f45a2aa5e6fd23502f30548cfe7410e3bd
3
+ size 119334504
data/dclm_crossdeduped/global-shard_01_of_10/part.100.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa86f0a96cdc0a7b397aa3cc93d4e78aeacdc352ac28b9e852af955197ec6c28
3
+ size 132071593
data/dclm_crossdeduped/global-shard_01_of_10/part.1000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9827402388257261a8e40e8d097fcff92253254d80c12eaa511f0ac046c52d9
3
+ size 129635356
data/dclm_crossdeduped/global-shard_01_of_10/part.1001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64324965580f7e2c586bf4ee9fa9610751a9f051c56cbcd81fc3ee309cef5bfe
3
+ size 135734771
data/dclm_crossdeduped/global-shard_01_of_10/part.1002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d3bef5fd986aa2077333df85958f84a884902b72ec16da2cede6f3fdb233499
3
+ size 136431182
data/dclm_crossdeduped/global-shard_01_of_10/part.1003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52785811cb8e72d892592974ed3680e213384a205cdf5b2971758c70b1856f12
3
+ size 135813300
data/dclm_crossdeduped/global-shard_01_of_10/part.1004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1c3a8adb6d727550847e7252b8ef8de1a5f921baf7198c6b97665a2b1b4f0cf
3
+ size 162213816
data/dclm_crossdeduped/global-shard_01_of_10/part.1005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b1909d4aae688952cc082b3766157e7fa0e33a8ac9f46b6e2d00d26c5f3a9d5
3
+ size 161643850
data/dclm_crossdeduped/global-shard_01_of_10/part.1006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28920ee6cf1e6007ff594a8f7d5bce6f6d4469311bed9be3d86271fbc45159bf
3
+ size 137365717
data/dclm_crossdeduped/global-shard_01_of_10/part.1007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d20e1e9c8da482b498fee2d4c5110977e5f395f89d36116cd6d468d8cc2c5c3
3
+ size 137966324
data/dclm_crossdeduped/global-shard_01_of_10/part.1008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa53c8a608d98773bd73f9888b3797ef4ee0cc98b5ddf4746f4a61ea65c59ad3
3
+ size 95283322
data/dclm_crossdeduped/global-shard_01_of_10/part.1009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9826df7a0d9e84911714646e4c3a7c33282921ee9af39d40aaec4c753863b0a
3
+ size 99249990
data/dclm_crossdeduped/global-shard_01_of_10/part.101.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1e6668771c6743ffa08919a8f378647fa3be7c1e8be18bd46a9a609c5b0444a
3
+ size 130379624
data/dclm_crossdeduped/global-shard_01_of_10/part.1010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd51cb8918033ba86dbce935ce1b0562cf101e3e8e55842680c66eb24b31264d
3
+ size 98152821
data/dclm_crossdeduped/global-shard_01_of_10/part.1011.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cceb005781262759c2c9f3873206a9a2039b2d4de40db19191f16f71346124c
3
+ size 152886040
data/dclm_crossdeduped/global-shard_01_of_10/part.1012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fad8a999c56b81ed301d505e8aa9e062ceaa4832e50c3a83ec2c0f46556b1cf8
3
+ size 155901872
data/dclm_crossdeduped/global-shard_01_of_10/part.1013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47ce935e17dfbd0765d2edbe6b862f94382e92f7488a8009c49d844c8162208e
3
+ size 122681381
data/dclm_crossdeduped/global-shard_01_of_10/part.1014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d6a340a37f1bafdea1554f4a22630b1b3fdf0888526ba1edb9cd6d568eab414
3
+ size 122990455
data/dclm_crossdeduped/global-shard_01_of_10/part.1015.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d775b9c04dd657700560889e23b28e5b35280d614344b7ec91f7d9c8fb0bc8b
3
+ size 124660841
data/dclm_crossdeduped/global-shard_01_of_10/part.1016.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:597cbb51f91071d7ec41ab881ae4087dce008bdab0ef4ecc5e7bbea38d294fee
3
+ size 138345659
data/dclm_crossdeduped/global-shard_01_of_10/part.1017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:594fc231cabf5fd1647983447bbf8d21c5ca620617b0018d4a7de4345a75278b
3
+ size 135476222
data/dclm_crossdeduped/global-shard_01_of_10/part.1018.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dbf9d75664873c04644f399d6c341f43f22a9513eec7dac447fbf881ddf4ff4
3
+ size 118482208
data/dclm_crossdeduped/global-shard_01_of_10/part.1019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea5b60449a7d8a6df6149bfc65816219c26ff2227726ba9b1cbea2c88db0a855
3
+ size 117933543
data/dclm_crossdeduped/global-shard_01_of_10/part.102.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b355d550664ca2752d0b1e68c0ddc49630626b282ef07fb0282b1b0f500ad909
3
+ size 145033248
data/dclm_crossdeduped/global-shard_01_of_10/part.1020.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b56b5fc4273222cd585ed3922934238bff2fe8b5bfac5e0f897e30cce5f9b900
3
+ size 118054607
data/dclm_crossdeduped/global-shard_01_of_10/part.1021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0efa819d961c0512640ee640042503610ff007322760cf63704de38f9016cbaf
3
+ size 91970093
data/dclm_crossdeduped/global-shard_01_of_10/part.1022.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae4566e76267fdd9d8b13f40294b9599d014745d86c7b19fd788854cf157bb9c
3
+ size 91445496
data/dclm_crossdeduped/global-shard_01_of_10/part.1023.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17bd430e7ed008fb955476a0f3fc25aac1513d4943758f083f6d9dbf86a8ce34
3
+ size 91343626
data/dclm_crossdeduped/global-shard_01_of_10/part.1024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2373e0131a267b5f58a45da7f57a49ec8e9206110a2c7cd8a6d643042d850b7
3
+ size 124796844
data/dclm_crossdeduped/global-shard_01_of_10/part.1025.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d7a95d6b693aed243f15e628c3597f7987a1f1f11de8b3e8ae4202d67c7479f
3
+ size 130104026
data/dclm_crossdeduped/global-shard_01_of_10/part.1026.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7e892b07593fe4d287a52418556eafa1851e418a31c03fff4e3a587b0e24875
3
+ size 148064531
data/dclm_crossdeduped/global-shard_01_of_10/part.1027.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f60295297a6c6bca71c9094a0271c9b1f058be898d0861517d05a764699ccc48
3
+ size 146383632
data/dclm_crossdeduped/global-shard_01_of_10/part.1028.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd6987a294e115272995d506defe2743cb7d6034de2f7a0c77fda1ee73d86349
3
+ size 142716510
data/dclm_crossdeduped/global-shard_01_of_10/part.1029.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5eef7bf091d062ad17f3ae3170c8e2fccaa832e25708a3888782ae0effd983a
3
+ size 145755350
data/dclm_crossdeduped/global-shard_01_of_10/part.103.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b121d066880bbdbafa1fdd82ff12cb3bc61529acc69938f8fef58fee9d0b0db9
3
+ size 142770034
data/dclm_crossdeduped/global-shard_01_of_10/part.1030.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2caccd39d34d3719206906993b1bd168c55e6a12ea0ecc77152ebf8c34b332c6
3
+ size 110897074
data/dclm_crossdeduped/global-shard_01_of_10/part.1031.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b971f0ef0823fd46044c51ffbfc07dbb9763472422ab4ccb1d623f63f470e93
3
+ size 114418860
data/dclm_crossdeduped/global-shard_01_of_10/part.1032.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa48a3c14934b819a9dc479be50258e4e706f55fb4d8d08b8adf94c6ed9b6ff5
3
+ size 113850977
data/dclm_crossdeduped/global-shard_01_of_10/part.1033.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:813dd89d29bd55cf70b8699fd39bc9afa25e18df231938aff731ccf06a8e4779
3
+ size 138080217
data/dclm_crossdeduped/global-shard_01_of_10/part.1034.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2963c42e05879884ecf7bae800dc2783045512de6fb327f6abe2676bee908649
3
+ size 137095098
data/dclm_crossdeduped/global-shard_01_of_10/part.1035.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5da1b4cde65aa2515c644769e1ed0e8e06f8d92e9921b7f9aaa652bc94385334
3
+ size 140513196
data/dclm_crossdeduped/global-shard_01_of_10/part.1036.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2adf0edc0225b8355e93e6dcaec7b48db5a5511fdb0b665f08040cfb48b27d52
3
+ size 141312675
data/dclm_crossdeduped/global-shard_01_of_10/part.1037.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5d5f83e48f0eef9f21a2d386b2bec93c2d946b6cbc1e9261d927ea40b1be748
3
+ size 119073699
data/dclm_crossdeduped/global-shard_01_of_10/part.1038.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:753608d4b4c619cf4dbbde6e71559073c9968005b3e8787d95ffb2dd7c46d3d6
3
+ size 119188017
data/dclm_crossdeduped/global-shard_01_of_10/part.1039.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfc1f2a5eaa1786a0d3ce29db57751f3970d3e6e82a8c82636aa61078a644296
3
+ size 117995619
data/dclm_crossdeduped/global-shard_01_of_10/part.104.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:229709414d345093daffce8accc462d3869f11e14f3d098380b095f2cbad6dae
3
+ size 138018962