Update README.md
Browse files
README.md
CHANGED
|
@@ -422,7 +422,7 @@ Domain and content type classification probabilities:
|
|
| 422 |
|
| 423 |
## How to Load the Dataset
|
| 424 |
|
| 425 |
-
This section provides examples of how to load the `EssentialAI/eai-taxonomy-
|
| 426 |
|
| 427 |
### Using Hugging Face Datasets (Standard Method)
|
| 428 |
|
|
@@ -432,7 +432,7 @@ The simplest way to load the dataset is using the Hugging Face `datasets` librar
|
|
| 432 |
from datasets import load_dataset
|
| 433 |
|
| 434 |
# Load the entire dataset
|
| 435 |
-
dataset = load_dataset("EssentialAI/eai-taxonomy-
|
| 436 |
|
| 437 |
# View dataset structure
|
| 438 |
print(dataset)
|
|
@@ -445,7 +445,7 @@ You can also load the dataset in streaming mode to avoid downloading the entire
|
|
| 445 |
from datasets import load_dataset
|
| 446 |
|
| 447 |
# Load in streaming mode
|
| 448 |
-
dataset = load_dataset("EssentialAI/eai-taxonomy-
|
| 449 |
data_stream = dataset["train"]
|
| 450 |
|
| 451 |
# Iterate through examples
|
|
@@ -465,10 +465,10 @@ import pyspark_huggingface
|
|
| 465 |
from pyspark.sql import SparkSession
|
| 466 |
|
| 467 |
# Initialize Spark session
|
| 468 |
-
spark = SparkSession.builder.appName("EAI-Taxonomy-
|
| 469 |
|
| 470 |
# Load the dataset using the "huggingface" data source
|
| 471 |
-
df = spark.read.format("huggingface").load("EssentialAI/eai-taxonomy-
|
| 472 |
|
| 473 |
# Basic dataset exploration
|
| 474 |
print(f"Dataset shape: {df.count()} rows, {len(df.columns)} columns")
|
|
@@ -479,14 +479,14 @@ df.printSchema()
|
|
| 479 |
df_subset = (
|
| 480 |
spark.read.format("huggingface")
|
| 481 |
.option("columns", '["column1", "column2"]') # Replace with actual column names
|
| 482 |
-
.load("EssentialAI/eai-taxonomy-
|
| 483 |
)
|
| 484 |
|
| 485 |
# Run SQL queries on the dataset
|
| 486 |
-
df.createOrReplaceTempView("
|
| 487 |
result = spark.sql("""
|
| 488 |
SELECT COUNT(*) as total_examples
|
| 489 |
-
FROM
|
| 490 |
""")
|
| 491 |
result.show()
|
| 492 |
```
|
|
@@ -499,7 +499,7 @@ Daft provides a modern DataFrame library optimized for machine learning workload
|
|
| 499 |
import daft
|
| 500 |
|
| 501 |
# Load the entire dataset
|
| 502 |
-
df = daft.read_parquet("hf://datasets/EssentialAI/eai-taxonomy-
|
| 503 |
|
| 504 |
# Basic exploration
|
| 505 |
print("Dataset schema:")
|
|
@@ -516,7 +516,7 @@ import daft
|
|
| 516 |
from daft.io import IOConfig, HTTPConfig
|
| 517 |
|
| 518 |
io_config = IOConfig(http=HTTPConfig(bearer_token="your_token"))
|
| 519 |
-
df = daft.read_parquet("hf://datasets/EssentialAI/eai-taxonomy-
|
| 520 |
```
|
| 521 |
|
| 522 |
### Installation Requirements
|
|
|
|
| 422 |
|
| 423 |
## How to Load the Dataset
|
| 424 |
|
| 425 |
+
This section provides examples of how to load the `EssentialAI/eai-taxonomy-med-w-dclm-100b-sample` dataset using different Python libraries and frameworks.
|
| 426 |
|
| 427 |
### Using Hugging Face Datasets (Standard Method)
|
| 428 |
|
|
|
|
| 432 |
from datasets import load_dataset
|
| 433 |
|
| 434 |
# Load the entire dataset
|
| 435 |
+
dataset = load_dataset("EssentialAI/eai-taxonomy-med-w-dclm-100b-sample")
|
| 436 |
|
| 437 |
# View dataset structure
|
| 438 |
print(dataset)
|
|
|
|
| 445 |
from datasets import load_dataset
|
| 446 |
|
| 447 |
# Load in streaming mode
|
| 448 |
+
dataset = load_dataset("EssentialAI/eai-taxonomy-med-w-dclm-100b-sample", streaming=True)
|
| 449 |
data_stream = dataset["train"]
|
| 450 |
|
| 451 |
# Iterate through examples
|
|
|
|
| 465 |
from pyspark.sql import SparkSession
|
| 466 |
|
| 467 |
# Initialize Spark session
|
| 468 |
+
spark = SparkSession.builder.appName("EAI-Taxonomy-Med-w-DCLM").getOrCreate()
|
| 469 |
|
| 470 |
# Load the dataset using the "huggingface" data source
|
| 471 |
+
df = spark.read.format("huggingface").load("EssentialAI/eai-taxonomy-med-w-dclm-100b-sample")
|
| 472 |
|
| 473 |
# Basic dataset exploration
|
| 474 |
print(f"Dataset shape: {df.count()} rows, {len(df.columns)} columns")
|
|
|
|
| 479 |
df_subset = (
|
| 480 |
spark.read.format("huggingface")
|
| 481 |
.option("columns", '["column1", "column2"]') # Replace with actual column names
|
| 482 |
+
.load("EssentialAI/eai-taxonomy-med-w-dclm-100b-sample")
|
| 483 |
)
|
| 484 |
|
| 485 |
# Run SQL queries on the dataset
|
| 486 |
+
df.createOrReplaceTempView("eai_taxonomy_med_w_dclm_dataset")
|
| 487 |
result = spark.sql("""
|
| 488 |
SELECT COUNT(*) as total_examples
|
| 489 |
+
FROM eai_taxonomy_med_w_dclm_dataset
|
| 490 |
""")
|
| 491 |
result.show()
|
| 492 |
```
|
|
|
|
| 499 |
import daft
|
| 500 |
|
| 501 |
# Load the entire dataset
|
| 502 |
+
df = daft.read_parquet("hf://datasets/EssentialAI/eai-taxonomy-med-w-dclm-100b-sample")
|
| 503 |
|
| 504 |
# Basic exploration
|
| 505 |
print("Dataset schema:")
|
|
|
|
| 516 |
from daft.io import IOConfig, HTTPConfig
|
| 517 |
|
| 518 |
io_config = IOConfig(http=HTTPConfig(bearer_token="your_token"))
|
| 519 |
+
df = daft.read_parquet("hf://datasets/EssentialAI/eai-taxonomy-med-w-dclm-100b-sample", io_config=io_config)
|
| 520 |
```
|
| 521 |
|
| 522 |
### Installation Requirements
|