Search is not available for this dataset

The dataset viewer should be available soon. Please retry later.

Stars

import requests
from datetime import datetime
from datasets import Dataset
import pyarrow as pa
import os

def get_stargazers(owner, repo, token):
    # Initialize the count and the page number
    page = 1
    stargazers = []
    while True:
        # Construct the URL for the stargazers with pagination
        stargazers_url = f"https://api.github.com/repos/{owner}/{repo}/stargazers?page={page}&per_page=100"

        # Send the request to GitHub API with appropriate headers
        headers = {"Accept": "application/vnd.github.v3.star+json", "Authorization": "token " + token}
        response = requests.get(stargazers_url, headers=headers)

        if response.status_code != 200:
            raise Exception(f"Failed to fetch stargazers with status code {response.status_code}: {response.text}")

        stargazers_page = response.json()

        if not stargazers_page:  # Exit the loop if there are no more stargazers to process
            break

        stargazers.extend(stargazers_page)
        page += 1  # Move to the next page

    return stargazers

token = os.environ.get("GITHUB_PAT")
stargazers = get_stargazers("huggingface", "trl", token)
stargazers = {key: [stargazer[key] for stargazer in stargazers] for key in stargazers[0].keys()}
dataset = Dataset.from_dict(stargazers)

def clean(example):
    starred_at = datetime.strptime(example["starred_at"], "%Y-%m-%dT%H:%M:%SZ")
    starred_at = pa.scalar(starred_at, type=pa.timestamp("s", tz="UTC"))
    return {"starred_at": starred_at, "user": example["user"]["login"]}

dataset = dataset.map(clean, remove_columns=dataset.column_names)
dataset.push_to_hub("qgallouedec/trl-metrics", config_name="stargazers")

Pypi downloads

from datasets import Dataset
from google.cloud import bigquery
import os

os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "propane-tree-432413-4c3e2b5e6b3c.json"

# Initialize a BigQuery client
client = bigquery.Client()

# Define your query
query = """
#standardSQL
WITH daily_downloads AS (
  SELECT
    DATE(timestamp) AS day,
    COUNT(*) AS num_downloads
  FROM
    `bigquery-public-data.pypi.file_downloads`
  WHERE
    file.project = 'trl'
    -- Filter for the last 12 months
    AND DATE(timestamp) BETWEEN DATE_SUB(CURRENT_DATE(), INTERVAL 54 MONTH) AND CURRENT_DATE()
  GROUP BY
    day
)
SELECT
  day,
  num_downloads
FROM
  daily_downloads
ORDER BY
  day DESC
"""

# Execute the query
query_job = client.query(query)

# Fetch the results
results = query_job.result()

# Convert the results to a pandas DataFrame and then to a Dataset
df = results.to_dataframe()
dataset = Dataset.from_pandas(df)

dataset.push_to_hub("qgallouedec/trl-metrics", config_name="pypi_downloads")

Models tagged

from huggingface_hub import HfApi
from datasets import Dataset

api = HfApi()
models = api.list_models(tags="trl")
dataset_list = [{"id": model.id, "created_at": model.created_at, "likes": model.likes, "downloads": model.downloads, "tags": model.tags} for model in models]
dataset_dict = {key: [d[key] for d in dataset_list] for key in dataset_list[0].keys()}
dataset = Dataset.from_dict(dataset_dict)
dataset.push_to_hub("qgallouedec/trl-metrics", config_name="models")

Issues and comments

import requests
from datetime import datetime
import os
from datasets import Dataset
from tqdm import tqdm

token = os.environ.get("GITHUB_PAT")

def get_full_response(url, headers, params=None):
    page = 1
    output = []
    params = params or {}
    while True:
        params = {**params, "page": page, "per_page": 100}
        response = requests.get(url, headers=headers, params=params)

        if response.status_code != 200:
            raise Exception(f"Failed to fetch issues: {response.text}")

        batch = response.json()
        if len(batch) == 0:
            break
        output.extend(batch)
        page += 1
    return output

# GitHub API URL for issues (closed and open)
issues_url = f"https://api.github.com/repos/huggingface/trl/issues"

# Set up headers for authentication
headers = {"Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json"}

# Make the request
issues = get_full_response(issues_url, headers, params={"state": "all"})

issues_dataset_dict = {
    "number": [],
    "title": [],
    "user": [],
    "state": [],
    "created_at": [],
    "closed_at": [],
    "comments_count": [],
}
comments_dataset_dict = {
    "user": [],
    "created_at": [],
    "body": [],
    "issue_number": [],
}
for issue in tqdm(issues):
    # Extract relevant information
    issue_number = issue["number"]
    title = issue["title"]
    created_at = datetime.strptime(issue["created_at"], "%Y-%m-%dT%H:%M:%SZ")
    comments_count = issue["comments"]
    comments_url = issue["comments_url"]

    comments = get_full_response(comments_url, headers=headers)
    for comment in comments:
        comments_dataset_dict["user"].append(comment["user"]["login"])
        comments_dataset_dict["created_at"].append(datetime.strptime(comment["created_at"], "%Y-%m-%dT%H:%M:%SZ"))
        comments_dataset_dict["body"].append(comment["body"])
        comments_dataset_dict["issue_number"].append(issue_number)

    issues_dataset_dict["number"].append(issue_number)
    issues_dataset_dict["title"].append(title)
    issues_dataset_dict["user"].append(issue["user"]["login"])
    issues_dataset_dict["state"].append(issue["state"])
    issues_dataset_dict["created_at"].append(datetime.strptime(issue["created_at"], "%Y-%m-%dT%H:%M:%SZ"))
    issues_dataset_dict["closed_at"].append(datetime.strptime(issue["closed_at"], "%Y-%m-%dT%H:%M:%SZ") if issue["closed_at"] else None)
    issues_dataset_dict["comments_count"].append(comments_count)

issues_dataset = Dataset.from_dict(issues_dataset_dict)
comments_dataset = Dataset.from_dict(comments_dataset_dict)

issues_dataset.push_to_hub("qgallouedec/trl-metrics", config_name="issues")
comments_dataset.push_to_hub("qgallouedec/trl-metrics", config_name="issue_comments")
Downloads last month
263