[Misc] Add SPDX-License-Identifier headers to python source files (#12628)

- **Add SPDX license headers to python source files**
- **Check for SPDX headers using pre-commit**

commit 9d7ef44c3cfb72ca4c32e1c677d99259d10d4745
Author: Russell Bryant <rbryant@redhat.com>
Date:   Fri Jan 31 14:18:24 2025 -0500

    Add SPDX license headers to python source files
    
This commit adds SPDX license headers to python source files as
recommended to
the project by the Linux Foundation. These headers provide a concise way
that is
both human and machine readable for communicating license information
for each
source file. It helps avoid any ambiguity about the license of the code
and can
    also be easily used by tools to help manage license compliance.
    
The Linux Foundation runs license scans against the codebase to help
ensure
    we are in compliance with the licenses of the code we use, including
dependencies. Having these headers in place helps that tool do its job.
    
    More information can be found on the SPDX site:
    
    - https://spdx.dev/learn/handling-license-info/
    
    Signed-off-by: Russell Bryant <rbryant@redhat.com>

commit 5a1cf1cb3b80759131c73f6a9dddebccac039dea
Author: Russell Bryant <rbryant@redhat.com>
Date:   Fri Jan 31 14:36:32 2025 -0500

    Check for SPDX headers using pre-commit
    
    Signed-off-by: Russell Bryant <rbryant@redhat.com>

---------

Signed-off-by: Russell Bryant <rbryant@redhat.com>
This commit is contained in:
Russell Bryant 2025-02-02 14:58:18 -05:00 committed by GitHub
parent f256ebe4df
commit e489ad7a21
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1012 changed files with 1884 additions and 2 deletions

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import os import os
import sys import sys
import zipfile import zipfile

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import os import os

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
LM eval harness on model to compare vs HF baseline computed offline. LM eval harness on model to compare vs HF baseline computed offline.
Configs are found in configs/$MODEL.yaml Configs are found in configs/$MODEL.yaml

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import json import json
import os import os
from pathlib import Path from pathlib import Path

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
from transformers import AutoTokenizer from transformers import AutoTokenizer

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import json import json
from pathlib import Path from pathlib import Path

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from lmdeploy.serve.openai.api_client import APIClient from lmdeploy.serve.openai.api_client import APIClient
api_client = APIClient("http://localhost:8000") api_client = APIClient("http://localhost:8000")

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import datetime import datetime
import json import json
import os import os

View File

@ -97,10 +97,14 @@ repos:
language: system language: system
verbose: true verbose: true
stages: [commit-msg] stages: [commit-msg]
- id: check-spdx-header
name: Check SPDX headers
entry: python tools/check_spdx_header.py
language: python
types: [python]
- id: suggestion - id: suggestion
name: Suggestion name: Suggestion
entry: bash -c 'echo "To bypass pre-commit hooks, add --no-verify to git commit."' entry: bash -c 'echo "To bypass pre-commit hooks, add --no-verify to git commit."'
language: system language: system
verbose: true verbose: true
pass_filenames: false pass_filenames: false

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import json import json
import os import os
import sys import sys

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
"""Benchmark guided decoding throughput.""" """Benchmark guided decoding throughput."""
import argparse import argparse
import dataclasses import dataclasses

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
"""Benchmark the latency of processing a single batch of requests.""" """Benchmark the latency of processing a single batch of requests."""
import argparse import argparse
import dataclasses import dataclasses

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
Offline benchmark to test the long document QA throughput. Offline benchmark to test the long document QA throughput.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
Benchmark the efficiency of prefix caching. Benchmark the efficiency of prefix caching.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
"""Benchmark offline prioritization.""" """Benchmark offline prioritization."""
import argparse import argparse
import dataclasses import dataclasses

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
r"""Benchmark online serving throughput. r"""Benchmark online serving throughput.
On the server side, run one of the following commands: On the server side, run one of the following commands:

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
r"""Benchmark online serving throughput with guided decoding. r"""Benchmark online serving throughput with guided decoding.
On the server side, run one of the following commands: On the server side, run one of the following commands:

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
"""Benchmark offline inference throughput.""" """Benchmark offline inference throughput."""
import argparse import argparse
import dataclasses import dataclasses

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import copy import copy
import itertools import itertools

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
# Cutlass bench utils # Cutlass bench utils
from typing import Iterable, Tuple from typing import Iterable, Tuple

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import copy import copy
import itertools import itertools

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
# Weight Shapes are in the format # Weight Shapes are in the format
# ([K, N], TP_SPLIT_DIM) # ([K, N], TP_SPLIT_DIM)
# Example: # Example:

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import os import os
import aiohttp import aiohttp

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import asyncio import asyncio
import itertools import itertools

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import json import json
import matplotlib.pyplot as plt import matplotlib.pyplot as plt

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import pickle as pkl import pickle as pkl
import time import time
from dataclasses import dataclass from dataclasses import dataclass

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import os import os
import sys import sys
from typing import Optional from typing import Optional

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import time import time
import torch import torch

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import copy import copy
import json import json

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import copy import copy
import itertools import itertools

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from typing import List from typing import List
import torch import torch

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import time import time
from datetime import datetime from datetime import datetime

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import random import random
import time import time
from typing import List, Optional from typing import List, Optional

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import time import time
import torch import torch

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import itertools import itertools
from typing import Optional, Tuple, Union from typing import Optional, Tuple, Union

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from itertools import accumulate from itertools import accumulate
from typing import List, Optional from typing import List, Optional

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
WEIGHT_SHAPES = { WEIGHT_SHAPES = {
"ideal": [[4 * 256 * 32, 256 * 32]], "ideal": [[4 * 256 * 32, 256 * 32]],
"mistralai/Mistral-7B-v0.1/TP1": [ "mistralai/Mistral-7B-v0.1/TP1": [

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import math import math
import pickle import pickle
import re import re

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import dataclasses import dataclasses
from typing import Any, Callable, Iterable, Optional from typing import Any, Callable, Iterable, Optional

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
# Weight Shapes are in the format # Weight Shapes are in the format
# ([K, N], TP_SPLIT_DIM) # ([K, N], TP_SPLIT_DIM)
# Example: # Example:

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import cProfile import cProfile
import pstats import pstats

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3 #!/usr/bin/env python3
# #

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
# ruff: noqa # ruff: noqa
# code borrowed from https://github.com/pytorch/pytorch/blob/main/torch/utils/collect_env.py # code borrowed from https://github.com/pytorch/pytorch/blob/main/torch/utils/collect_env.py

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import enum import enum
from typing import Dict, Union from typing import Dict, Union

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import itertools import itertools
import math import math
import os import os

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
# Configuration file for the Sphinx documentation builder. # Configuration file for the Sphinx documentation builder.
# #
# This file only contains a selection of the most common options. For a full # This file only contains a selection of the most common options. For a full

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import itertools import itertools
import re import re
from dataclasses import dataclass, field from dataclasses import dataclass, field

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams
from vllm.utils import FlexibleArgumentParser from vllm.utils import FlexibleArgumentParser

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams
# Sample prompts. # Sample prompts.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
This example shows how to use vLLM for running offline inference This example shows how to use vLLM for running offline inference
with the correct prompt format on audio language models. with the correct prompt format on audio language models.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams
# Sample prompts. # Sample prompts.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM from vllm import LLM
# Sample prompts. # Sample prompts.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams
llm = LLM(model="meta-llama/Meta-Llama-3-8B-Instruct") llm = LLM(model="meta-llama/Meta-Llama-3-8B-Instruct")

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
# ruff: noqa # ruff: noqa
import json import json
import random import random

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM from vllm import LLM
# Sample prompts. # Sample prompts.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from dataclasses import asdict from dataclasses import asdict
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams
# Sample prompts. # Sample prompts.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
This example shows how to use Ray Data for running offline batch inference This example shows how to use Ray Data for running offline batch inference
distributively on a multi-nodes cluster. distributively on a multi-nodes cluster.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM from vllm import LLM
# Sample prompts. # Sample prompts.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
''' '''
Demonstrate prompting of text-to-text Demonstrate prompting of text-to-text
encoder/decoder models, specifically BART encoder/decoder models, specifically BART

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
''' '''
Demonstrate prompting of text-to-text Demonstrate prompting of text-to-text
encoder/decoder models, specifically Florence-2 encoder/decoder models, specifically Florence-2

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from huggingface_hub import hf_hub_download from huggingface_hub import hf_hub_download
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
from typing import List, Tuple from typing import List, Tuple

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
This example shows how to use LoRA with different quantization techniques This example shows how to use LoRA with different quantization techniques
for offline inference. for offline inference.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import gc import gc
import time import time
from typing import List from typing import List

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
This example shows how to use the multi-LoRA functionality This example shows how to use the multi-LoRA functionality
for offline inference. for offline inference.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams
# Sample prompts. # Sample prompts.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import os import os
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
# ruff: noqa # ruff: noqa
import argparse import argparse

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams
from vllm.distributed import cleanup_dist_env_and_memory from vllm.distributed import cleanup_dist_env_and_memory

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import inspect import inspect
import json import json
import os import os

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import dataclasses import dataclasses
import os import os

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
a simple demonstration of RLHF with vLLM, inspired by a simple demonstration of RLHF with vLLM, inspired by
the OpenRLHF framework https://github.com/OpenRLHF/OpenRLHF . the OpenRLHF framework https://github.com/OpenRLHF/OpenRLHF .

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
Saves each worker's model state dict directly to a checkpoint, which enables a Saves each worker's model state dict directly to a checkpoint, which enables a
fast load path for large tensor-parallel models where each worker only needs to fast load path for large tensor-parallel models where each worker only needs to

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM from vllm import LLM
# Sample prompts. # Sample prompts.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import os import os
import time import time

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from enum import Enum from enum import Enum
from pydantic import BaseModel from pydantic import BaseModel

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
experimental support for tensor-parallel inference with torchrun, experimental support for tensor-parallel inference with torchrun,
see https://github.com/vllm-project/vllm/issues/11400 for see https://github.com/vllm-project/vllm/issues/11400 for

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams
prompts = [ prompts = [

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
This example shows how to use vLLM for running offline inference with This example shows how to use vLLM for running offline inference with
the correct prompt format on vision language models for text generation. the correct prompt format on vision language models for text generation.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
This example shows how to use vLLM for running offline inference with This example shows how to use vLLM for running offline inference with
the correct prompt format on vision language models for multimodal embedding. the correct prompt format on vision language models for multimodal embedding.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
This example shows how to use vLLM for running offline inference with This example shows how to use vLLM for running offline inference with
multi-image input on vision language models for text generation, multi-image input on vision language models for text generation,

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import time import time
from vllm import LLM, SamplingParams from vllm import LLM, SamplingParams

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
"""Example Python client for `vllm.entrypoints.api_server` """Example Python client for `vllm.entrypoints.api_server`
NOTE: The API server is used only for demonstration and simple performance NOTE: The API server is used only for demonstration and simple performance
benchmarks. It is not intended for production use. benchmarks. It is not intended for production use.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
Example of using the OpenAI entrypoint's rerank API which is compatible with Example of using the OpenAI entrypoint's rerank API which is compatible with
the Cohere SDK: https://github.com/cohere-ai/cohere-python the Cohere SDK: https://github.com/cohere-ai/cohere-python

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import gradio as gr import gradio as gr

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import json import json

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
Example of using the OpenAI entrypoint's rerank API which is compatible with Example of using the OpenAI entrypoint's rerank API which is compatible with
Jina and Cohere https://jina.ai/reranker Jina and Cohere https://jina.ai/reranker

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from openai import OpenAI from openai import OpenAI
# Modify OpenAI's API key and API base to use vLLM's API server. # Modify OpenAI's API key and API base to use vLLM's API server.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
"""An example showing how to use vLLM to serve multimodal models """An example showing how to use vLLM to serve multimodal models
and run online serving with OpenAI client. and run online serving with OpenAI client.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
Set up this example by starting a vLLM OpenAI-compatible server with tool call Set up this example by starting a vLLM OpenAI-compatible server with tool call
options enabled. For example: options enabled. For example:

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from enum import Enum from enum import Enum
from openai import OpenAI from openai import OpenAI

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
An example shows how to generate chat completions from reasoning models An example shows how to generate chat completions from reasoning models
like DeepSeekR1. like DeepSeekR1.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
An example shows how to generate chat completions from reasoning models An example shows how to generate chat completions from reasoning models
like DeepSeekR1. like DeepSeekR1.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import argparse import argparse
import base64 import base64
import io import io

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from openai import OpenAI from openai import OpenAI
# Modify OpenAI's API key and API base to use vLLM's API server. # Modify OpenAI's API key and API base to use vLLM's API server.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
Example online usage of Score API. Example online usage of Score API.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
from openai import OpenAI from openai import OpenAI
# Modify OpenAI's API key and API base to use vLLM's API server. # Modify OpenAI's API key and API base to use vLLM's API server.

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
""" """
Example online usage of Pooling API. Example online usage of Pooling API.

View File

@ -1,3 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
import requests import requests
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
OTLPSpanExporter) OTLPSpanExporter)

Some files were not shown because too many files have changed in this diff Show More