{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "avZX1qmK9EgR", "outputId": "811315c5-b8da-4403-c6cc-9f281c78cc43" }, "outputs": [], "source": [ "!pip3 install bibtexparser polars datasets -q" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "qGzu2KkloHHQ", "outputId": "56864cca-8abd-481c-b47b-d503694ad024" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "--2025-05-16 16:05:50-- https://aclanthology.org/anthology+abstracts.bib.gz\n", "Resolving aclanthology.org (aclanthology.org)... 157.245.140.42\n", "Connecting to aclanthology.org (aclanthology.org)|157.245.140.42|:443... connected.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 31139200 (30M) [application/x-gzip]\n", "Saving to: ‘anthology+abstracts.bib.gz’\n", "\n", "anthology+abstracts 100%[===================>] 29.70M 134MB/s in 0.2s \n", "\n", "2025-05-16 16:05:50 (134 MB/s) - ‘anthology+abstracts.bib.gz’ saved [31139200/31139200]\n", "\n" ] } ], "source": [ "!wget https://aclanthology.org/anthology+abstracts.bib.gz" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "id": "vAJ_j-VwoMOk" }, "outputs": [], "source": [ "!gunzip anthology+abstracts.bib.gz" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "id": "bPP3eGBTp1PC" }, "outputs": [], "source": [ "import bibtexparser\n", "import json\n", "\n", "def bib_to_json(bib_file_path, json_file_path=None):\n", " # Read the BIB file\n", " with open(bib_file_path, 'r', encoding='utf-8') as bibtex_file:\n", " bib_database = bibtexparser.load(bibtex_file)\n", "\n", " # Convert to JSON\n", " json_data = json.dumps(bib_database.entries, indent=4, ensure_ascii=False)\n", "\n", " # Write to file if path is provided\n", " if json_file_path:\n", " with open(json_file_path, 'w', encoding='utf-8') as json_file:\n", " json_file.write(json_data)\n", " print(f\"Converted BIB to JSON and saved to {json_file_path}\")\n", "\n", " return json_data" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "id": "sJglwtdc4HHw" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Converted BIB to JSON and saved to acl_abstracts.json\n", "First few characters of the JSON: [\n", " {\n", " \"url\": \"https://aclanthology.org/2025.wraicogs-1.0/\",\n", " \"publisher\": \"International Committee on Computational Linguistics\",\n", " \"address\": \"Abu Dhabi, UAE\",\n", " \"year\": \"2025\",\n", " \"month\": \"January\",\n", " \"editor\": \"Zock, Michael and\\nInui, Kentaro and\\nYuan, Zheng\",\n", " \"title\": \"Proceedings of the First Workshop on Writing Aids at the Crossroads of AI, Cognitive Science and NLP (WRAICOGS 2025)\",\n", " \"ENTRYTYPE\": \"proceedings\",\n", " \"ID\": \"wraicogs-ws-2025-1\"\n", " },\n", " {\n", " \"abstract\": \"Large Language Models (LLMs) have been used to generate texts in response to different writing tasks: reports, essays, story telling. However, language models do not have a metarepresentation of the text writing process, nor inherent communication learning needs, comparable to those of young human students. This paper introduces a fine-grained linguistic and textual analysis of multilingual Small Language Models' (SLMs) writing. With our method,\n" ] } ], "source": [ "result = bib_to_json(\"anthology+abstracts.bib\", \"acl_abstracts.json\")\n", "print(\"First few characters of the JSON:\", result[:1000])" ] }, { "cell_type": "markdown", "metadata": { "id": "r02pllfCQ_a6" }, "source": [ "# Convert to a dataframe" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "VSqwn6ll_lIa" }, "outputs": [], "source": [ "import polars as pl\n", "pl.Config.set_fmt_str_lengths(100)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "id": "B32wuA2d9MeV" }, "outputs": [], "source": [ "df = pl.read_json(\"acl_abstracts.json\",\n", " schema={\n", " \"ENTRYTYPE\": pl.Utf8,\n", " \"ID\": pl.Utf8,\n", " \"year\": pl.Utf8,\n", " \"title\": pl.Utf8,\n", " \"abstract\": pl.Utf8,\n", " }\n", " )" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 360 }, "id": "HL47l6Rs_AkX", "outputId": "b40bb2a3-a1ee-4950-8a8c-4e124493ef24" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "60497\n" ] }, { "data": { "text/html": [ "
| ID | year | title | abstract |
|---|---|---|---|
| str | i64 | str | str |
| "buhnila-etal-2025-chain" | 2025 | "Chain-of-MetaWriting: Linguistic and Textual Analysis of How Small Language Models Write Young Stude… | "Large Language Models (LLMs) have been used to generate texts in response to different writing tasks… |
| "shi-penn-2025-semantic" | 2025 | "Semantic Masking in a Needle-in-a-haystack Test for Evaluating Large Language Model Long-Text Capabi… | "In this paper, we introduce the concept of Semantic Masking, where semantically coherent surrounding… |
| "khallaf-etal-2025-reading" | 2025 | "Reading Between the Lines: A dataset and a study on why some texts are tougher than others" | "Our research aims at better understanding what makes a text difficult to read for specific audiences… |
| "jourdan-etal-2025-pararev" | 2025 | "ParaRev : Building a dataset for Scientific Paragraph Revision annotated with revision instruction" | "Revision is a crucial step in scientific writing, where authors refine their work to improve clarity… |
| "maggi-vitaletti-2025-towards" | 2025 | "Towards an operative definition of creative writing: a preliminary assessment of creativeness in AI … | "Nowadays, AI is present in all our activities. This pervasive presence is perceived as a threat by m… |