Update README.md
Browse files
README.md
CHANGED
|
@@ -416,7 +416,7 @@ Domain and content type classification probabilities:
|
|
| 416 |
|
| 417 |
## How to Load the Dataset
|
| 418 |
|
| 419 |
-
This section provides examples of how to load the `
|
| 420 |
|
| 421 |
### Using Hugging Face Datasets (Standard Method)
|
| 422 |
|
|
@@ -426,7 +426,7 @@ The simplest way to load the dataset is using the Hugging Face `datasets` librar
|
|
| 426 |
from datasets import load_dataset
|
| 427 |
|
| 428 |
# Load the entire dataset
|
| 429 |
-
dataset = load_dataset("
|
| 430 |
|
| 431 |
# View dataset structure
|
| 432 |
print(dataset)
|
|
@@ -439,7 +439,7 @@ You can also load the dataset in streaming mode to avoid downloading the entire
|
|
| 439 |
from datasets import load_dataset
|
| 440 |
|
| 441 |
# Load in streaming mode
|
| 442 |
-
dataset = load_dataset("
|
| 443 |
data_stream = dataset["train"]
|
| 444 |
|
| 445 |
# Iterate through examples
|
|
@@ -462,7 +462,7 @@ from pyspark.sql import SparkSession
|
|
| 462 |
spark = SparkSession.builder.appName("EAI-Taxonomy-Math").getOrCreate()
|
| 463 |
|
| 464 |
# Load the dataset using the "huggingface" data source
|
| 465 |
-
df = spark.read.format("huggingface").load("
|
| 466 |
|
| 467 |
# Basic dataset exploration
|
| 468 |
print(f"Dataset shape: {df.count()} rows, {len(df.columns)} columns")
|
|
@@ -473,7 +473,7 @@ df.printSchema()
|
|
| 473 |
df_subset = (
|
| 474 |
spark.read.format("huggingface")
|
| 475 |
.option("columns", '["column1", "column2"]') # Replace with actual column names
|
| 476 |
-
.load("
|
| 477 |
)
|
| 478 |
|
| 479 |
# Run SQL queries on the dataset
|
|
@@ -493,7 +493,7 @@ Daft provides a modern DataFrame library optimized for machine learning workload
|
|
| 493 |
import daft
|
| 494 |
|
| 495 |
# Load the entire dataset
|
| 496 |
-
df = daft.read_parquet("hf://datasets/
|
| 497 |
|
| 498 |
# Basic exploration
|
| 499 |
print("Dataset schema:")
|
|
@@ -507,16 +507,10 @@ If you need to access private datasets or use authentication:
|
|
| 507 |
|
| 508 |
```python
|
| 509 |
import daft
|
| 510 |
-
import
|
| 511 |
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
# Load with authentication
|
| 516 |
-
df = daft.read_parquet(
|
| 517 |
-
"hf://datasets/Research-EAI/eai-taxonomy-math-w-fm",
|
| 518 |
-
hf_token=os.environ["HF_TOKEN"]
|
| 519 |
-
)
|
| 520 |
```
|
| 521 |
|
| 522 |
### Installation Requirements
|
|
|
|
| 416 |
|
| 417 |
## How to Load the Dataset
|
| 418 |
|
| 419 |
+
This section provides examples of how to load the `EssentialAI/eai-taxonomy-math-w-fm` dataset using different Python libraries and frameworks.
|
| 420 |
|
| 421 |
### Using Hugging Face Datasets (Standard Method)
|
| 422 |
|
|
|
|
| 426 |
from datasets import load_dataset
|
| 427 |
|
| 428 |
# Load the entire dataset
|
| 429 |
+
dataset = load_dataset("EssentialAI/eai-taxonomy-math-w-fm")
|
| 430 |
|
| 431 |
# View dataset structure
|
| 432 |
print(dataset)
|
|
|
|
| 439 |
from datasets import load_dataset
|
| 440 |
|
| 441 |
# Load in streaming mode
|
| 442 |
+
dataset = load_dataset("EssentialAI/eai-taxonomy-math-w-fm", streaming=True)
|
| 443 |
data_stream = dataset["train"]
|
| 444 |
|
| 445 |
# Iterate through examples
|
|
|
|
| 462 |
spark = SparkSession.builder.appName("EAI-Taxonomy-Math").getOrCreate()
|
| 463 |
|
| 464 |
# Load the dataset using the "huggingface" data source
|
| 465 |
+
df = spark.read.format("huggingface").load("EssentialAI/eai-taxonomy-math-w-fm")
|
| 466 |
|
| 467 |
# Basic dataset exploration
|
| 468 |
print(f"Dataset shape: {df.count()} rows, {len(df.columns)} columns")
|
|
|
|
| 473 |
df_subset = (
|
| 474 |
spark.read.format("huggingface")
|
| 475 |
.option("columns", '["column1", "column2"]') # Replace with actual column names
|
| 476 |
+
.load("EssentialAI/eai-taxonomy-math-w-fm")
|
| 477 |
)
|
| 478 |
|
| 479 |
# Run SQL queries on the dataset
|
|
|
|
| 493 |
import daft
|
| 494 |
|
| 495 |
# Load the entire dataset
|
| 496 |
+
df = daft.read_parquet("hf://datasets/EssentialAI/eai-taxonomy-math-w-fm")
|
| 497 |
|
| 498 |
# Basic exploration
|
| 499 |
print("Dataset schema:")
|
|
|
|
| 507 |
|
| 508 |
```python
|
| 509 |
import daft
|
| 510 |
+
from daft.io import IOConfig, HTTPConfig
|
| 511 |
|
| 512 |
+
io_config = IOConfig(http=HTTPConfig(bearer_token="your_token"))
|
| 513 |
+
df = daft.read_parquet("hf://datasets/EssentialAI/eai-taxonomy-math-w-fm", io_config=io_config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 514 |
```
|
| 515 |
|
| 516 |
### Installation Requirements
|