language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
tests/sentry/auth/test_system.py
|
{
"start": 253,
"end": 821
}
|
class ____(TestCase):
def test_is_system_auth(self) -> None:
token = SystemToken()
assert is_system_auth(token)
assert not is_system_auth({})
@django_db_all
@control_silo_test
def test_system_token_option() -> None:
from sentry import options
options.delete("sentry:system-token")
try:
get_system_token()
assert (
options.get_last_update_channel("sentry:system-token")
== options.UpdateChannel.APPLICATION
)
finally:
options.delete("sentry:system-token")
|
TestSystemAuth
|
python
|
getsentry__sentry
|
src/sentry/insights/__init__.py
|
{
"start": 82,
"end": 3033
}
|
class ____(NamedTuple):
"""Interface for detecting relevant Insights modules for a span.
Spans in transactions have a different schema than spans from Kafka. Going through this interface
makes the classification schema-agnostic.
"""
op: str | None
category: str | None
description: str | None
transaction_op: str | None
@classmethod
def from_span_v1(cls, span: dict[str, Any]) -> "FilterSpan":
"""Get relevant fields from a span as they appear in transaction payloads."""
return cls(
op=span.get("op"),
category=span.get("sentry_tags", {}).get("category"),
description=span.get("description"),
transaction_op=span.get("sentry_tags", {}).get("transaction.op"),
)
@classmethod
def from_span_attributes(cls, attributes: dict[str, Any]) -> "FilterSpan":
"""Get relevant fields from `span.attributes`."""
return cls(
op=(attributes.get("sentry.op") or {}).get("value"),
category=(attributes.get("sentry.category") or {}).get("value"),
description=(attributes.get("sentry.description") or {}).get("value"),
transaction_op=(attributes.get("sentry.transaction_op") or {}).get("value"),
)
def is_http(span: FilterSpan) -> bool:
return span.category == "http" and span.op == "http.client"
def is_db(span: FilterSpan) -> bool:
return span.category == "db" and span.description is not None
def is_assets(span: FilterSpan) -> bool:
return span.op in ["resource.script", "resource.css", "resource.font", "resource.img"]
def is_app_start(span: FilterSpan) -> bool:
return span.op is not None and span.op.startswith("app.start.")
def is_screen_load(span: FilterSpan) -> bool:
return span.transaction_op == "ui.load"
def is_vital(span: FilterSpan) -> bool:
return span.transaction_op == "pageload"
def is_cache(span: FilterSpan) -> bool:
return span.op in ["cache.get_item", "cache.get", "cache.put"]
def is_queue(span: FilterSpan) -> bool:
return span.op in ["queue.process", "queue.publish"]
def is_agents(span: FilterSpan) -> bool:
return span.op is not None and span.op.startswith("gen_ai.")
def is_mcp(span: FilterSpan) -> bool:
return span.op is not None and span.op.startswith("mcp.")
INSIGHT_MODULE_FILTERS = {
InsightModules.HTTP: is_http,
InsightModules.DB: is_db,
InsightModules.ASSETS: is_assets,
InsightModules.APP_START: is_app_start,
InsightModules.SCREEN_LOAD: is_screen_load,
InsightModules.VITAL: is_vital,
InsightModules.CACHE: is_cache,
InsightModules.QUEUE: is_queue,
InsightModules.AGENTS: is_agents,
InsightModules.MCP: is_mcp,
}
def modules(spans: list[FilterSpan]) -> set[InsightModules]:
return {
module
for module, is_module in INSIGHT_MODULE_FILTERS.items()
if any(is_module(span) for span in spans)
}
|
FilterSpan
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-patentsview/llama_index/readers/patentsview/base.py
|
{
"start": 251,
"end": 1454
}
|
class ____(BaseReader):
"""
Patentsview reader.
Read patent abstract.
"""
def __init__(self) -> None:
"""Initialize with request body."""
self.json = {"q": {"patent_id": None}, "f": ["patent_abstract"]}
def load_data(self, patent_number: List[str]) -> List[Document]:
"""
Load patent abstract given list of patent numbers.
Args:
patent_number: List[str]: List of patent numbers, e.g., 8848839.
Returens:
List[Document]: A list of Document objects, each including the abstract for a patent.
"""
if not patent_number:
raise ValueError("Please input patent number")
self.json["q"]["patent_id"] = patent_number
response = requests.post(BASE_URL, json=self.json)
if response.status_code == 200:
data = response.json()
patents = data.get("patents", [])
results = []
for patent in patents:
results.append(Document(text=patent["patent_abstract"]))
else:
raise Exception(f"Request failed with status code: {response.status_code}")
return results
|
PatentsviewReader
|
python
|
doocs__leetcode
|
solution/1600-1699/1632.Rank Transform of a Matrix/Solution.py
|
{
"start": 0,
"end": 614
}
|
class ____:
def __init__(self, n):
self.p = list(range(n))
self.size = [1] * n
def find(self, x):
if self.p[x] != x:
self.p[x] = self.find(self.p[x])
return self.p[x]
def union(self, a, b):
pa, pb = self.find(a), self.find(b)
if pa != pb:
if self.size[pa] > self.size[pb]:
self.p[pb] = pa
self.size[pa] += self.size[pb]
else:
self.p[pa] = pb
self.size[pb] += self.size[pa]
def reset(self, x):
self.p[x] = x
self.size[x] = 1
|
UnionFind
|
python
|
spyder-ide__spyder
|
external-deps/spyder-kernels/spyder_kernels/console/outstream.py
|
{
"start": 434,
"end": 2587
}
|
class ____(OutStream):
"""Subclass of OutStream that represents a TTY."""
def __init__(self, session, pub_thread, name, pipe=None, echo=None, *,
watchfd=True):
super().__init__(session, pub_thread, name, pipe,
echo=echo, watchfd=watchfd, isatty=True)
def _flush(self):
"""This is where the actual send happens.
_flush should generally be called in the IO thread,
unless the thread has been destroyed (e.g. forked subprocess).
NOTE: Overrided method to be able to filter messages.
See spyder-ide/spyder#22181
"""
self._flush_pending = False
self._subprocess_flush_pending = False
if self.echo is not None:
try:
self.echo.flush()
except OSError as e:
if self.echo is not sys.__stderr__:
print(f"Flush failed: {e}", file=sys.__stderr__)
for parent, data in self._flush_buffers():
# Messages that will not be printed to the console. This allows us
# to deal with issues such as spyder-ide/spyder#22181
filter_messages = ["Parent poll failed."]
if data and not any(
[message in data for message in filter_messages]
):
# FIXME: this disables Session's fork-safe check,
# since pub_thread is itself fork-safe.
# There should be a better way to do this.
self.session.pid = os.getpid()
content = {"name": self.name, "text": data}
msg = self.session.msg("stream", content, parent=parent)
# Each transform either returns a new
# message or None. If None is returned,
# the message has been 'used' and we return.
for hook in self._hooks:
msg = hook(msg)
if msg is None:
return
self.session.send(
self.pub_thread,
msg,
ident=self.topic,
)
|
TTYOutStream
|
python
|
scipy__scipy
|
scipy/signal/tests/test_upfirdn.py
|
{
"start": 2493,
"end": 4879
}
|
class ____:
"""Test _UpFIRDn object"""
def __init__(self, up, down, h, x_dtype, *, xp=None):
if xp is None:
xp = np_compat
self.up = up
self.down = down
self.h = np.atleast_1d(h)
self.x_dtype = x_dtype
self.rng = np.random.RandomState(17)
self.xp = xp
def __call__(self):
# tiny signal
self.scrub(np.ones(1, self.x_dtype))
# ones
self.scrub(np.ones(10, self.x_dtype)) # ones
# randn
x = self.rng.randn(10).astype(self.x_dtype)
if self.x_dtype in (np.complex64, np.complex128):
x += 1j * self.rng.randn(10)
self.scrub(x)
# ramp
self.scrub(np.arange(10).astype(self.x_dtype))
# 3D, random
if is_cupy(self.xp):
# ndim > 2 is unsupported in CuPy.
return
size = (2, 3, 5)
x = self.rng.randn(*size).astype(self.x_dtype)
if self.x_dtype in (np.complex64, np.complex128):
x += 1j * self.rng.randn(*size)
for axis in range(len(size)):
self.scrub(x, axis=axis)
x = x[:, ::2, 1::3].T
for axis in range(len(size)):
self.scrub(x, axis=axis)
def scrub(self, x, axis=-1):
xp = self.xp
yr = np.apply_along_axis(upfirdn_naive, axis, x,
self.h, self.up, self.down)
want_len = _output_len(len(self.h), x.shape[axis], self.up, self.down)
assert yr.shape[axis] == want_len
y = upfirdn(xp.asarray(self.h), xp.asarray(x), self.up, self.down,
axis=axis)
assert y.shape[axis] == want_len
assert y.shape == yr.shape
dtypes = (self.h.dtype, x.dtype)
if all(d == np.complex64 for d in dtypes):
assert y.dtype == xp.complex64
elif np.complex64 in dtypes and np.float32 in dtypes:
assert y.dtype == xp.complex64
elif all(d == np.float32 for d in dtypes):
assert y.dtype == xp.float32
elif np.complex128 in dtypes or np.complex64 in dtypes:
assert y.dtype == xp.complex128
else:
assert y.dtype == xp.float64
yr = xp.asarray(yr, dtype=y.dtype)
xp_assert_close(yr, y)
_UPFIRDN_TYPES = ("int64", "float32", "complex64", "float64", "complex128")
@make_xp_test_case(upfirdn)
|
UpFIRDnCase
|
python
|
weaviate__weaviate-python-client
|
weaviate/exceptions.py
|
{
"start": 4430,
"end": 4601
}
|
class ____(WeaviateBaseError):
"""Occurs when an HTTP request unexpectedly returns an empty response."""
EmptyResponseException = EmptyResponseError
|
EmptyResponseError
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/event/attr.py
|
{
"start": 19680,
"end": 21549
}
|
class ____(_CompoundListener[_ET]):
__slots__ = "parent_dispatch", "name", "local", "parent_listeners"
parent_dispatch: _DispatchCommon[_ET]
name: str
local: _InstanceLevelDispatch[_ET]
parent_listeners: Collection[_ListenerFnType]
def __init__(
self,
parent_dispatch: _DispatchCommon[_ET],
name: str,
local: _EmptyListener[_ET],
):
self._exec_once = False
self._exec_w_sync_once = False
self._exec_once_mutex = None
self.parent_dispatch = parent_dispatch
self.name = name
self.local = local
self.parent_listeners = self.local
if not typing.TYPE_CHECKING:
# first error, I don't really understand:
# Signature of "listeners" incompatible with
# supertype "_CompoundListener" [override]
# the name / return type are exactly the same
# second error is getattr_isn't typed, the cast() here
# adds too much method overhead
@property
def listeners(self) -> Collection[_ListenerFnType]:
return getattr(self.parent_dispatch, self.name)
def _adjust_fn_spec(
self, fn: _ListenerFnType, named: bool
) -> _ListenerFnType:
return self.local._adjust_fn_spec(fn, named)
def for_modify(self, obj: _DispatchCommon[_ET]) -> _JoinedListener[_ET]:
self.local = self.parent_listeners = self.local.for_modify(obj)
return self
def insert(self, event_key: _EventKey[_ET], propagate: bool) -> None:
self.local.insert(event_key, propagate)
def append(self, event_key: _EventKey[_ET], propagate: bool) -> None:
self.local.append(event_key, propagate)
def remove(self, event_key: _EventKey[_ET]) -> None:
self.local.remove(event_key)
def clear(self) -> None:
raise NotImplementedError()
|
_JoinedListener
|
python
|
apache__airflow
|
devel-common/src/docs/build_docs.py
|
{
"start": 6335,
"end": 30533
}
|
class ____(NamedTuple):
"""Result of spellcheck."""
package_name: str
log_file_name: Path
spelling_errors: list[SpellingError]
build_errors: list[DocBuildError]
def perform_docs_build_for_single_package(build_specification: BuildSpecification) -> BuildDocsResult:
"""Performs single package docs build."""
builder = AirflowDocsBuilder(package_name=build_specification.package_name)
builder.is_autobuild = build_specification.is_autobuild
console.print(
f"[bright_blue]{build_specification.package_name:60}:[/] Building documentation"
+ (" (autobuild)" if build_specification.is_autobuild else "")
)
result = BuildDocsResult(
package_name=build_specification.package_name,
errors=builder.build_sphinx_docs(
verbose=build_specification.verbose,
),
log_file_name=builder.log_build_filename,
)
return result
def perform_spell_check_for_single_package(build_specification: BuildSpecification) -> SpellCheckResult:
"""Performs single package spell check."""
builder = AirflowDocsBuilder(package_name=build_specification.package_name)
builder.is_autobuild = build_specification.is_autobuild
console.print(f"[bright_blue]{build_specification.package_name:60}:[/] Checking spelling started")
spelling_errors, build_errors = builder.check_spelling(
verbose=build_specification.verbose,
)
result = SpellCheckResult(
package_name=build_specification.package_name,
spelling_errors=spelling_errors,
build_errors=build_errors,
log_file_name=builder.log_spelling_filename,
)
console.print(f"[bright_blue]{build_specification.package_name:60}:[/] Checking spelling completed")
return result
def build_docs_for_packages(
packages_to_build: list[str],
is_autobuild: bool,
docs_only: bool,
spellcheck_only: bool,
jobs: int,
verbose: bool,
) -> tuple[dict[str, list[DocBuildError]], dict[str, list[SpellingError]]]:
"""Builds documentation for all packages and combines errors."""
all_build_errors: dict[str, list[DocBuildError]] = defaultdict(list)
all_spelling_errors: dict[str, list[SpellingError]] = defaultdict(list)
with with_group("Cleaning documentation files"):
for package_name in packages_to_build:
console.print(f"[bright_blue]{package_name:60}:[/] Cleaning files")
builder = AirflowDocsBuilder(package_name=package_name)
builder.is_autobuild = is_autobuild
builder.clean_files()
if jobs > 1 and len(packages_to_build) > 1:
run_in_parallel(
all_build_errors=all_build_errors,
all_spelling_errors=all_spelling_errors,
packages_to_build=packages_to_build,
docs_only=docs_only,
jobs=jobs,
spellcheck_only=spellcheck_only,
verbose=verbose,
)
else:
run_sequentially(
all_build_errors=all_build_errors,
all_spelling_errors=all_spelling_errors,
packages_to_build=packages_to_build,
is_autobuild=is_autobuild,
docs_only=docs_only,
spellcheck_only=spellcheck_only,
verbose=verbose,
)
return all_build_errors, all_spelling_errors
def run_sequentially(
all_build_errors,
all_spelling_errors,
is_autobuild,
packages_to_build,
docs_only,
spellcheck_only,
verbose,
):
"""Run both - spellcheck and docs build sequentially without multiprocessing"""
if not spellcheck_only:
for package_name in packages_to_build:
build_result = perform_docs_build_for_single_package(
build_specification=BuildSpecification(
package_name=package_name,
is_autobuild=is_autobuild,
verbose=verbose,
)
)
if build_result.errors:
all_build_errors[package_name].extend(build_result.errors)
print_build_output(build_result)
if not docs_only:
for package_name in packages_to_build:
spellcheck_result = perform_spell_check_for_single_package(
build_specification=BuildSpecification(
package_name=package_name,
is_autobuild=is_autobuild,
verbose=verbose,
)
)
if spellcheck_result.spelling_errors:
all_spelling_errors[package_name].extend(spellcheck_result.spelling_errors)
if spellcheck_only:
all_build_errors[package_name].extend(spellcheck_result.build_errors)
print_spelling_output(spellcheck_result)
def run_in_parallel(
all_build_errors: dict[str, list[DocBuildError]],
all_spelling_errors: dict[str, list[SpellingError]],
packages_to_build: list[str],
docs_only: bool,
jobs: int,
spellcheck_only: bool,
verbose: bool,
):
"""Run both - spellcheck and docs build sequentially without multiprocessing"""
with multiprocessing.Pool(processes=jobs) as pool:
if not spellcheck_only:
run_docs_build_in_parallel(
all_build_errors=all_build_errors,
packages_to_build=packages_to_build,
verbose=verbose,
pool=pool,
)
if not docs_only:
run_spell_check_in_parallel(
all_spelling_errors=all_spelling_errors,
all_build_errors=all_build_errors,
packages_to_build=packages_to_build,
verbose=verbose,
pool=pool,
)
def print_build_output(result: BuildDocsResult):
"""Prints output of docs build job."""
with with_group(f"{TEXT_RED}Output for documentation build {result.package_name}{TEXT_RESET}"):
console.print()
console.print(f"[bright_blue]{result.package_name:60}: " + "#" * 80)
with open(result.log_file_name) as output:
for line in output.read().splitlines():
console.print(f"{result.package_name:60} {line}")
console.print(f"[bright_blue]{result.package_name:60}: " + "#" * 80)
def run_docs_build_in_parallel(
all_build_errors: dict[str, list[DocBuildError]],
packages_to_build: list[str],
verbose: bool,
pool: Any, # Cannot use multiprocessing types here: https://github.com/python/typeshed/issues/4266
):
"""Runs documentation building in parallel."""
doc_build_specifications: list[BuildSpecification] = []
with with_group("Scheduling documentation to build"):
for package_name in packages_to_build:
console.print(f"[bright_blue]{package_name:60}:[/] Scheduling documentation to build")
doc_build_specifications.append(
BuildSpecification(
is_autobuild=False,
package_name=package_name,
verbose=verbose,
)
)
with with_group("Running docs building"):
console.print()
result_list = pool.map(perform_docs_build_for_single_package, doc_build_specifications)
for result in result_list:
if result.errors:
all_build_errors[result.package_name].extend(result.errors)
print_build_output(result)
def print_spelling_output(result: SpellCheckResult):
"""Prints output of spell check job."""
with with_group(f"{TEXT_RED}Output for spelling check: {result.package_name}{TEXT_RESET}"):
console.print()
console.print(f"[bright_blue]{result.package_name:60}: " + "#" * 80)
with open(result.log_file_name) as output:
for line in output.read().splitlines():
console.print(f"{result.package_name:60} {line}")
console.print(f"[bright_blue]{result.package_name:60}: " + "#" * 80)
console.print()
def run_spell_check_in_parallel(
all_spelling_errors: dict[str, list[SpellingError]],
all_build_errors: dict[str, list[DocBuildError]],
packages_to_build: list[str],
verbose: bool,
pool,
):
"""Runs spell check in parallel."""
spell_check_specifications: list[BuildSpecification] = []
with with_group("Scheduling spell checking of documentation"):
for package_name in packages_to_build:
console.print(f"[bright_blue]{package_name:60}:[/] Scheduling spellchecking")
spell_check_specifications.append(
BuildSpecification(package_name=package_name, is_autobuild=False, verbose=verbose)
)
with with_group("Running spell checking of documentation"):
console.print()
result_list = pool.map(perform_spell_check_for_single_package, spell_check_specifications)
for result in result_list:
if result.spelling_errors:
all_spelling_errors[result.package_name].extend(result.spelling_errors)
all_build_errors[result.package_name].extend(result.build_errors)
print_spelling_output(result)
def display_packages_summary(
build_errors: dict[str, list[DocBuildError]], spelling_errors: dict[str, list[SpellingError]]
):
"""Displays a summary that contains information on the number of errors in each packages"""
packages_names = {*build_errors.keys(), *spelling_errors.keys()}
tabular_data = [
{
"Package name": f"[bright_blue]{package_name}[/]",
"Count of doc build errors": len(build_errors.get(package_name, [])),
"Count of spelling errors": len(spelling_errors.get(package_name, [])),
}
for package_name in sorted(packages_names, key=lambda k: k or "")
]
console.print("#" * 20, " Packages errors summary ", "#" * 20)
console.print(tabulate(tabular_data=tabular_data, headers="keys"))
console.print("#" * 50)
def print_build_errors_and_exit(
build_errors: dict[str, list[DocBuildError]],
spelling_errors: dict[str, list[SpellingError]],
spellcheck_only: bool,
) -> None:
"""Prints build errors and exists."""
if build_errors or spelling_errors:
if build_errors:
if spellcheck_only:
console.print("f[warning]There were some build errors remaining.")
console.print()
else:
display_errors_summary(build_errors)
console.print()
if spelling_errors:
display_spelling_error_summary(spelling_errors)
console.print()
console.print("The documentation has errors.")
display_packages_summary(build_errors, spelling_errors)
console.print()
console.print(CHANNEL_INVITATION)
sys.exit(1)
else:
console.print("[green]Documentation build is successful[/]")
def do_list_packages():
available_packages = get_available_packages()
console.print(
"\nAvailable packages (in parenthesis the short form of the"
" package is shown if it has a short form):\n"
)
for package in available_packages:
short_package_name = get_short_form(package)
if short_package_name:
console.print(f"[bright_blue]{package} ({short_package_name}) [/] ")
else:
console.print(f"[bright_blue]{package} [/] ")
console.print()
def is_command_available(command):
"""Check if a command is available in the system's PATH."""
return shutil.which(command) is not None
click.rich_click.OPTION_GROUPS = {
"build-docs": [
{
"name": "Build scope (default is to build docs and spellcheck)",
"options": ["--docs-only", "--spellcheck-only"],
},
{
"name": "Type of build",
"options": ["--autobuild", "--one-pass-only"],
},
{
"name": "Cleaning inventories",
"options": ["--clean-build", "--refresh-airflow-inventories"],
},
{
"name": "Filtering options",
"options": [
"--package-filter",
"--list-packages",
],
},
{
"name": "Miscellaneous options",
"options": [
"--include-commits",
"--jobs",
"--verbose",
],
},
],
}
@click.command(name="build-docs")
@click.option(
"--autobuild",
is_flag=True,
help="Starts server, serving the build docs (sphinx-autobuild) rebuilding and "
"refreshing the docs when they change on the disk. "
"Implies --verbose, --docs-only and --one-pass-only",
)
@click.option("--one-pass-only", is_flag=True, help="Do not attempt multiple builds on error")
@click.option(
"--package-filter",
"package_filters",
multiple=True,
help="Filter(s) to use more than one can be specified. You can use glob pattern matching the full "
"package name, for example `apache-airflow-providers-*`. "
"Useful when you want to select several similarly named packages together. "
"When no filtering is specified and the command is run inside one of the distribution packages with docs, "
"only that package is selected to build. "
"If the command is run in the root of the Airflow repo, all packages are selected to be built.",
)
@click.option("--docs-only", is_flag=True, help="Only build documentation")
@click.option("--spellcheck-only", is_flag=True, help="Only perform spellchecking")
@click.option(
"--include-commits", help="Include commits in the documentation.", envvar="INCLUDE_COMMITS", is_flag=True
)
@click.option(
"-j",
"--jobs",
default=os.cpu_count(),
show_default=True,
type=int,
help="Number of parallel processes that will be spawned to build the docs. If not set, default - equal "
"to the number of CPU cores - will be used. If set to 1, the build will be done sequentially.",
)
@click.option(
"--list-packages",
is_flag=True,
help="Lists all available packages. You can use it to check the names of the packages you want to build.",
)
@click.option(
"--clean-build",
is_flag=True,
help="Cleans the build directory before building the documentation and removes all inventory "
"cache (including external inventories).",
)
@click.option(
"--refresh-airflow-inventories",
is_flag=True,
help="When set, only airflow package inventories will be refreshed, regardless "
"if they are already downloaded. With `--clean-build` - everything is cleaned..",
)
@click.option(
"-v",
"--verbose",
is_flag=True,
help="Increases the verbosity of the script i.e. always displays a full log of "
"the build process, not just when it encounters errors",
)
@click.argument(
"packages",
nargs=-1,
type=BetterChoice(get_available_packages(short_form=True)),
)
def build_docs(
autobuild,
one_pass_only,
package_filters,
clean_build,
docs_only,
spellcheck_only,
include_commits,
jobs,
list_packages,
refresh_airflow_inventories,
verbose,
packages,
):
"""Builds documentation and runs spell checking for all distribution packages of airflow.."""
if list_packages:
do_list_packages()
sys.exit(0)
command = "sphinx-autobuild" if autobuild else "sphinx-build"
if not is_command_available(command):
console.print(
f"\n[red]Command '{command}' is not available. "
"Please use `--group docs` after the `uv run` when running the command:[/]\n"
)
console.print("uv run --group docs build-docs ...\n")
sys.exit(1)
available_packages = get_available_packages()
filters_to_add = []
for package_name in packages:
if package_name in available_packages:
filters_to_add.append(package_name)
else:
long_form = get_long_form(package_name)
if long_form:
filters_to_add.append(long_form)
else:
console.print("[red]Bad package specified as argument[/]:", package_name)
sys.exit(1)
if filters_to_add:
package_filters = tuple(set(package_filters).union(set(filters_to_add)))
packages_to_build = find_packages_to_build(available_packages, package_filters)
for package_name in packages_to_build:
builder = AirflowDocsBuilder(package_name=package_name)
api_dir = builder._api_dir
if api_dir.exists():
try:
if api_dir.iterdir():
shutil.rmtree(api_dir)
except StopIteration:
pass
with with_group("Fetching inventories"):
# Inventories that could not be retrieved should be built first. This may mean this is a
# new package.
packages_without_inventories = fetch_inventories(
clean_build=clean_build, refresh_airflow_inventories=refresh_airflow_inventories
)
normal_packages, priority_packages = partition(
lambda d: d in packages_without_inventories, packages_to_build
)
normal_packages, priority_packages = list(normal_packages), list(priority_packages)
if len(packages_to_build) > 1 and autobuild:
console.print("[red]You cannot use more than 1 package with --autobuild. Quitting.[/]")
sys.exit(1)
if autobuild:
console.print(
"[yellow]Autobuild mode is enabled. Forcing --docs-only, --one-pass-only and --verbose[/]"
)
docs_only = True
verbose = True
one_pass_only = True
if len(packages_to_build) == 1:
console.print(
"[yellow]Building one package. Forcing --one-pass-only and --jobs to 1 as only one pass is needed."
)
one_pass_only = True
jobs = 1
with with_group(
f"Documentation will be built for {len(packages_to_build)} package(s)"
+ (f"with up to {jobs} parallel jobs," if jobs > 1 else "")
):
for pkg_no, pkg in enumerate(packages_to_build, start=1):
console.print(f"{pkg_no}. {pkg}")
os.environ["INCLUDE_COMMITS"] = "true" if include_commits else "false"
all_build_errors: dict[str | None, list[DocBuildError]] = {}
all_spelling_errors: dict[str | None, list[SpellingError]] = {}
if priority_packages:
# Build priority packages
package_build_errors, package_spelling_errors = build_docs_for_packages(
packages_to_build=priority_packages,
is_autobuild=autobuild,
docs_only=docs_only,
spellcheck_only=spellcheck_only,
jobs=jobs,
verbose=verbose,
)
if package_build_errors:
all_build_errors.update(package_build_errors)
if package_spelling_errors:
all_spelling_errors.update(package_spelling_errors)
# Build normal packages
# If only one inventory is missing, the remaining packages are correct. If we are missing
# two or more inventories, it is better to try to build for all packages as the previous packages
# may have failed as well.
package_build_errors, package_spelling_errors = build_docs_for_packages(
packages_to_build=packages_to_build if len(priority_packages) > 1 else normal_packages,
is_autobuild=autobuild,
docs_only=docs_only,
spellcheck_only=spellcheck_only,
jobs=jobs,
verbose=verbose,
)
if package_build_errors:
all_build_errors.update(package_build_errors)
if package_spelling_errors:
all_spelling_errors.update(package_spelling_errors)
if not one_pass_only:
# Build documentation for some packages again if it can help them.
package_build_errors = retry_building_docs_if_needed(
all_build_errors=all_build_errors,
all_spelling_errors=all_spelling_errors,
autobuild=autobuild,
docs_only=docs_only,
jobs=jobs,
verbose=verbose,
package_build_errors=package_build_errors,
originally_built_packages=packages_to_build,
# If spellchecking fails, we need to rebuild all packages first in case some references
# are broken between packages
rebuild_all_packages=spellcheck_only,
)
# And try again in case one change spans across three-level dependencies.
package_build_errors = retry_building_docs_if_needed(
all_build_errors=all_build_errors,
all_spelling_errors=all_spelling_errors,
autobuild=autobuild,
docs_only=docs_only,
jobs=jobs,
verbose=verbose,
package_build_errors=package_build_errors,
originally_built_packages=packages_to_build,
# In the 3rd pass we only rebuild packages that failed in the 2nd pass
# no matter if we do spellcheck-only build
rebuild_all_packages=False,
)
if spellcheck_only:
# And in case of spellcheck-only, we add a 4th pass to account for A->B-C case
# For spellcheck-only build, the first pass does not solve any of the dependency
# Issues, they only start getting solved and the 2nd pass so we might need to do one more pass
package_build_errors = retry_building_docs_if_needed(
all_build_errors=all_build_errors,
all_spelling_errors=all_spelling_errors,
autobuild=autobuild,
docs_only=docs_only,
jobs=jobs,
verbose=verbose,
package_build_errors=package_build_errors,
originally_built_packages=packages_to_build,
# In the 4th pass we only rebuild packages that failed in the 3rd pass
# no matter if we do spellcheck-only build
rebuild_all_packages=False,
)
dev_index_generator.generate_index(f"{GENERATED_PATH}/_build/index.html")
if not package_filters:
_promote_new_flags()
print_build_errors_and_exit(
all_build_errors,
all_spelling_errors,
spellcheck_only,
)
def retry_building_docs_if_needed(
all_build_errors: dict[str, list[DocBuildError]],
all_spelling_errors: dict[str, list[SpellingError]],
autobuild: bool,
docs_only: bool,
jobs: int,
verbose: bool,
package_build_errors: dict[str, list[DocBuildError]],
originally_built_packages: list[str],
rebuild_all_packages: bool,
) -> dict[str, list[DocBuildError]]:
to_retry_packages = [
package_name
for package_name, errors in package_build_errors.items()
if any(any((m in e.message) for m in ERRORS_ELIGIBLE_TO_REBUILD) for e in errors)
]
if not to_retry_packages:
console.print("[green]No packages to retry. No more passes are needed.[/]")
return package_build_errors
console.print("[warning] Some packages failed to build due to dependencies. We need another pass.[/]")
# if we are rebuilding all packages, we need to retry all packages
# even if there is one package to rebuild only
if rebuild_all_packages:
console.print("[warning]Rebuilding all originally built package as this is the first build pass:[/]")
to_retry_packages = originally_built_packages
console.print(f"[bright_blue]Packages to rebuild: {to_retry_packages}[/]")
for package_name in to_retry_packages:
if package_name in all_build_errors:
del all_build_errors[package_name]
if package_name in all_spelling_errors:
del all_spelling_errors[package_name]
package_build_errors, package_spelling_errors = build_docs_for_packages(
packages_to_build=to_retry_packages,
is_autobuild=autobuild,
docs_only=docs_only,
spellcheck_only=False,
jobs=jobs,
verbose=verbose,
)
if package_build_errors:
all_build_errors.update(package_build_errors)
if package_spelling_errors:
all_spelling_errors.update(package_spelling_errors)
return package_build_errors
if __name__ == "__main__":
build_docs()
|
SpellCheckResult
|
python
|
pytorch__pytorch
|
test/quantization/core/test_workflow_ops.py
|
{
"start": 11540,
"end": 53276
}
|
class ____(TestCase):
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_forward_per_tensor(self, device, X):
r"""Tests the forward path of the FakeQuantizePerTensorAffine op.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
Y = _fake_quantize_per_tensor_affine_reference(X.cpu(), scale, zero_point, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip("temporarily disable the test")
def test_backward_per_tensor(self, device, X):
r"""Tests the backward method.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
X.requires_grad_()
Y = _fake_quantize_per_tensor_affine_reference(X.cpu(), scale, zero_point, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
dout = torch.rand_like(X, dtype=torch.float).to(device)
dX = _fake_quantize_per_tensor_affine_grad_reference(
dout, X, scale, zero_point, quant_min, quant_max)
Y_prime.backward(dout)
np.testing.assert_allclose(dX.cpu(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
def test_forward_backward_per_tensor_with_amp(self):
net = nn.Sequential(nn.Conv2d(1, 1, 3))
net.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')
net_prep = torch.ao.quantization.prepare_qat(net)
with torch.cuda.amp.autocast():
x = torch.randn(4, 1, 5, 5)
out = net_prep(x).sum()
out.backward()
self.assertTrue(net_prep[0].weight.grad is not None)
def test_forward_per_tensor_half_precision_numerics(self):
scale = .1
zero = 0
maxi = 255
mini = 0
for _ in range(20):
X1 = torch.randn(5, 5).to(torch.float16)
Y1 = torch.fake_quantize_per_tensor_affine(X1, scale, zero, mini, maxi)
Y1r = _fake_quantize_per_tensor_affine_reference(X1, scale, zero, mini, maxi)
self.assertEqual(Y1, Y1r, rtol=tolerance, atol=tolerance)
# to force overflow
X2 = torch.tensor(2**15 + .01).to(torch.float16)
Y2 = torch.fake_quantize_per_tensor_affine(X2, scale, zero, mini, maxi)
Y2r = _fake_quantize_per_tensor_affine_reference(X2, scale, zero, mini, maxi)
self.assertEqual(Y2, Y2r, rtol=tolerance, atol=tolerance)
scale = 10
# to force underflow
X3 = torch.tensor(2**-24).to(torch.float16)
Y3 = torch.fake_quantize_per_tensor_affine(X3, scale, zero, mini, maxi)
Y3r = _fake_quantize_per_tensor_affine_reference(X3, scale, zero, mini, maxi)
self.assertEqual(Y3, Y3r, rtol=tolerance, atol=tolerance)
def _test_forward_per_tensor_cachemask_impl(self, device):
float_types = (torch.float32, torch.float16, torch.float64, torch.bfloat16)
torch_types = (torch.qint8, torch.quint8)
Xs = (torch.randn(4, 8, device=device), torch.randn(4, 16, device=device)[:, ::2])
tensor_qparams = (True, False)
for float_type, torch_type, X, tensor_qparam in itertools.product(float_types, torch_types, Xs, tensor_qparams):
# pick the scale + zp so that some values get clipped
X = X.to(float_type)
obs = torch.ao.quantization.MinMaxObserver(torch_type)
obs.to(device)
obs(X * 0.75)
scale, zero_point = obs.calculate_qparams()
quant_min, quant_max = obs.quant_min, obs.quant_max
if not tensor_qparam:
scale, zero_point = float(scale), int(zero_point)
Y_test = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
Y_ref = _fake_quantize_per_tensor_affine_reference(
X, scale, zero_point, quant_min, quant_max).to(device)
self.assertEqual(Y_test, Y_ref, rtol=tolerance, atol=tolerance)
self.assertTrue(Y_test.dtype == float_type)
def test_forward_per_tensor_cachemask_cpu(self):
device = torch.device('cpu')
self._test_forward_per_tensor_cachemask_impl(device)
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_forward_per_tensor_cachemask_cuda(self):
device = torch.device('cuda')
self._test_forward_per_tensor_cachemask_impl(device)
def _test_backward_per_tensor_cachemask_impl(self, device):
float_types = (torch.float32, torch.float16, torch.float64)
torch_types = (torch.qint8, torch.quint8)
tensor_qparams = (True, False)
for float_type, torch_type, tensor_qparam in itertools.product(float_types, torch_types, tensor_qparams):
X = torch.randn(4, 8).to(device).to(float_type)
X.requires_grad_()
# pick the scale + zp so that some values get clipped
obs = torch.ao.quantization.MinMaxObserver(torch_type)
obs.to(device)
obs(X * 0.75)
scale, zero_point = obs.calculate_qparams()
if not tensor_qparam:
scale, zero_point = float(scale), int(zero_point)
quant_min, quant_max = obs.quant_min, obs.quant_max
# forward pass
Y_test = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
Y_ref = _fake_quantize_per_tensor_affine_reference(
X, scale, zero_point, quant_min, quant_max).to(device)
self.assertEqual(Y_test, Y_ref, rtol=tolerance, atol=tolerance)
# backward pass
dout = torch.rand_like(X, dtype=torch.float).to(device)
dX = _fake_quantize_per_tensor_affine_grad_reference(
dout, X, scale, zero_point, quant_min, quant_max)
Y_test.backward(dout)
self.assertEqual(dX, X.grad)
self.assertTrue(X.grad.dtype == float_type)
def test_backward_per_tensor_cachemask_cpu(self):
device = torch.device('cpu')
self._test_backward_per_tensor_cachemask_impl(device)
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_backward_per_tensor_cachemask_cuda(self):
device = torch.device('cuda')
self._test_backward_per_tensor_cachemask_impl(device)
def _test_learnable_forward_per_tensor(self, X, device, scale_base, zero_point_base):
X_base = torch.tensor(X).to(device)
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** n_bits - 1
X = X_base.clone().float()
scale_base = scale_base.to(device).float()
zero_point_base = zero_point_base.to(dtype=torch.int32, device=device)
scale = scale_base.clone()
zero_point = zero_point_base.clamp(quant_min, quant_max)
Y = _fake_quantize_per_tensor_affine_reference(
X, scale, zero_point, quant_min, quant_max).to(device)
for grad_factor in [0.1, 1.0, 10.0]:
Y_prime = torch._fake_quantize_learnable_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max, grad_factor).to(device)
self.assertTrue(
torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),
"Expected kernel forward function to have results match the reference forward function")
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_learnable_forward_per_tensor_cpu(self, X):
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_forward_per_tensor(
X, 'cpu', scale_base, zero_point_base)
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_forward_per_tensor_cuda(self, X):
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_forward_per_tensor(
X, 'cuda', scale_base, zero_point_base)
def _test_learnable_backward_per_tensor(self, X, device, scale_base, zero_point_base, dtype=torch.float32):
r"""Tests the backward method with additional backprop support for scale and zero point.
"""
X_base = torch.tensor(X).to(device)
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** n_bits - 1
X = X_base.clone().to(device)
X.requires_grad_()
scale_base = scale_base.to(device)
zero_point_base = zero_point_base.to(device)
scale = scale_base.clone()
scale.requires_grad_()
zero_point = zero_point_base.clone().clamp(quant_min, quant_max)
zero_point.requires_grad_()
for grad_factor in [0.1, 1.0, 10.0]:
Y_prime = torch._fake_quantize_learnable_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max, grad_factor).to(device)
dout = torch.rand_like(X, dtype=torch.float).to(device)
dX, dScale, dZeroPoint = _fake_quantize_learnable_per_tensor_affine_grad_reference(
dout, X, scale, zero_point, quant_min, quant_max, device, dtype)
Y_prime.backward(dout)
expected_dX = dX.to(device).detach()
actual_dX = X.grad.to(device).detach()
expected_dScale = dScale.to(device).detach()
actual_dScale = scale.grad.to(device).detach()
expected_dZeroPoint = dZeroPoint.to(device).detach()
actual_dZeroPoint = zero_point.grad.to(device).detach()
self.assertTrue(
torch.allclose(
expected_dX, actual_dX, rtol=tolerance, atol=tolerance),
"Expected dX to match X.grad")
self.assertTrue(
torch.allclose(
expected_dScale * grad_factor, actual_dScale, rtol=tolerance, atol=tolerance),
"Expected dScale to match scale.grad")
self.assertTrue(
torch.allclose(
expected_dZeroPoint * grad_factor, actual_dZeroPoint, rtol=tolerance, atol=tolerance),
"Expected dZeroPoint to match zero_point.grad")
X.grad.data.zero_()
scale.grad.data.zero_()
zero_point.grad.data.zero_()
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_backward_per_tensor_cpu(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_backward_per_tensor(
X, 'cpu', scale_base, zero_point_base)
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_backward_per_tensor_cuda(self):
# setting seed to avoid increasing tolerance due to cases where
# difference in Python vs CPP downcasting causes tensor mismatches
# e.g. 27.87704 vs 27.8408 before downcasting, 27.7500 vs 27.8750 after downcasting for Python vs CPP op
torch.random.manual_seed(12)
x_shape = (2, 1)
for dtype in [torch.bfloat16, torch.float32]:
X_base = torch.randn(x_shape, dtype=dtype, device='cuda')
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100).to(dtype=dtype)
zero_point_base = torch.normal(mean=0, std=128, size=(1,)).to(dtype=dtype)
self._test_learnable_backward_per_tensor(
X_base, 'cuda', scale_base, zero_point_base, dtype)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=[torch.quint8])),
)
def test_fq_module_per_tensor(self, device, X):
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
X.requires_grad_()
fq_module = torch.ao.quantization.default_fake_quant().to(device)
Y_prime = fq_module(X)
assert fq_module.scale is not None
assert fq_module.zero_point is not None
Y = _fake_quantize_per_tensor_affine_reference(X, fq_module.scale, fq_module.zero_point, quant_min, quant_max)
np.testing.assert_allclose(Y.cpu().detach().numpy(), Y_prime.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
# Test backward
dout = torch.rand_like(X, dtype=torch.float, device=device)
Y_prime.backward(dout)
dX = _fake_quantize_per_tensor_affine_grad_reference(dout, X, fq_module.scale, fq_module.zero_point, quant_min, quant_max)
np.testing.assert_allclose(dX.cpu().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_fixed_qparams_fq_module(self, device, X):
X, (scale, zero_point, torch_type) = X
X = to_tensor(X, device)
fq_module = default_fixed_qparams_range_0to1_fake_quant()
fq_module.to(device)
fixed_scale = fq_module.scale.clone()
fixed_zero_point = fq_module.zero_point.clone()
# run fq module and make sure the quantization parameters does not change
torch.ao.quantization.enable_observer(fq_module)
fq_module(X)
self.assertEqual(fixed_scale, fq_module.scale)
self.assertEqual(fixed_zero_point, fq_module.zero_point)
def test_fq_serializable_per_tensor(self):
observer = default_observer
quant_min = 0
quant_max = 127
for FakeQuantizeClass in [FakeQuantize, _LearnableFakeQuantize]:
fq_module = FakeQuantizeClass(observer, quant_min, quant_max)
X = torch.tensor([-5, -3.5, -2, 0, 3, 5, 7], dtype=torch.float32)
y_ref = fq_module(X)
state_dict = fq_module.state_dict()
self.assertEqual(state_dict['scale'], 0.094488)
self.assertEqual(state_dict['zero_point'], 53)
b = io.BytesIO()
torch.save(state_dict, b)
for weights_only in [True, False]:
b.seek(0)
loaded_dict = torch.load(b, weights_only=weights_only)
loaded_fq_module = FakeQuantizeClass(observer, quant_min, quant_max)
loaded_fq_module.load_state_dict(loaded_dict)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_fq_module.state_dict()[key])
self.assertEqual(loaded_fq_module.calculate_qparams(), fq_module.calculate_qparams())
def test_fake_quant_control(self):
for fq_module in [torch.ao.quantization.default_fake_quant(),
_LearnableFakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0,
quant_max=255,
dtype=torch.quint8, qscheme=torch.per_tensor_affine,
reduce_range=True)()]:
torch.manual_seed(42)
X = torch.rand(20, 10, dtype=torch.float32)
# Output of fake quant is not identical to input
Y = fq_module(X)
self.assertNotEqual(Y, X)
if type(fq_module) is _LearnableFakeQuantize:
fq_module.toggle_fake_quant(False)
else:
torch.ao.quantization.disable_fake_quant(fq_module)
X = torch.rand(20, 10, dtype=torch.float32)
Y = fq_module(X)
# Fake quant is disabled,output is identical to input
self.assertEqual(Y, X)
# Explicit copy at this point in time, because FakeQuant keeps internal
# state in mutable buffers.
scale = fq_module.scale.detach().clone()
zero_point = fq_module.zero_point.detach().clone()
if type(fq_module) is _LearnableFakeQuantize:
fq_module.toggle_observer_update(False)
fq_module.toggle_fake_quant(True)
else:
torch.ao.quantization.disable_observer(fq_module)
torch.ao.quantization.enable_fake_quant(fq_module)
X = 10.0 * torch.rand(20, 10, dtype=torch.float32) - 5.0
Y = fq_module(X)
self.assertNotEqual(Y, X)
# Observer is disabled, scale and zero-point do not change
self.assertEqual(fq_module.scale, scale)
self.assertEqual(fq_module.zero_point, zero_point)
if type(fq_module) is _LearnableFakeQuantize:
fq_module.toggle_observer_update(True)
else:
torch.ao.quantization.enable_observer(fq_module)
Y = fq_module(X)
self.assertNotEqual(Y, X)
# Observer is enabled, scale and zero-point are different
self.assertNotEqual(fq_module.scale, scale)
self.assertNotEqual(fq_module.zero_point, zero_point)
def test_fake_quant_preserves_qparam_shapes_for_activations(self):
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(4, 4)
def forward(self, x):
x = self.linear(x)
return x
m = Model()
m.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')
torch.ao.quantization.prepare_qat(m, inplace=True)
scale_shape_before = m.linear.activation_post_process.scale.shape
zero_point_shape_before = m.linear.activation_post_process.zero_point.shape
x = torch.rand(4, 4, 4, 4)
m(x)
scale_shape_after = m.linear.activation_post_process.scale.shape
zero_point_shape_after = m.linear.activation_post_process.zero_point.shape
self.assertEqual(
scale_shape_before, scale_shape_after,
msg="FakeQuant scale shape must stay consistent")
self.assertEqual(
zero_point_shape_before, zero_point_shape_after,
msg="FakeQuant zero_point shape must stay consistent")
def fake_quant_scriptable(self):
observer = default_observer
quant_min = 0
quant_max = 255
for FakeQuantizeClass in [FakeQuantize, _LearnableFakeQuantize]:
fq_module = FakeQuantizeClass(observer, quant_min, quant_max)
scripted_module = torch.jit.script(fq_module)
X = torch.tensor([-5, -3.5, -2, 0, 3, 5, 7], dtype=torch.float32)
fq_module(X)
scripted_module(X)
self.assertEqual(fq_module.calculate_qparams(), scripted_module.calculate_qparams())
buf = io.BytesIO()
torch.jit.save(scripted_module, buf)
buf.seek(0)
loaded_module = torch.jit.load(buf)
self.assertEqual(fq_module.calculate_qparams(), loaded_module.calculate_qparams())
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_forward_per_channel(self, device, X):
r"""Tests the forward path of the FakeQuantizePerTensorAffine op.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
scale = to_tensor(scale, device)
zero_point = torch.tensor(zero_point).to(dtype=torch.int32, device=device)
Y = _fake_quantize_per_channel_affine_reference(X.cpu(), scale.cpu(), zero_point.cpu(), axis, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
def _test_forward_per_channel_cachemask_impl(self, device):
torch_types = (torch.qint8, torch.quint8)
float_types = (torch.float32, torch.float16, torch.float64, torch.bfloat16)
zero_point_types = (torch.int, torch.float32, torch.float16)
for torch_type, float_type, zero_point_type in itertools.product(torch_types, float_types, zero_point_types):
X = torch.randn(1, 2, 4, 4, dtype=float_type).to(device)
# pick the scale + zp so that some values get clipped
axis = 1
obs = torch.ao.quantization.PerChannelMinMaxObserver(axis, torch_type).to(device)
obs(X * 0.75)
scale, zero_point = obs.calculate_qparams()
# TODO(future PR): fix the wrong dtype in obs.calculate_qparams and remove the cast
zero_point = zero_point.to(zero_point_type)
quant_min, quant_max = obs.quant_min, obs.quant_max
Y = _fake_quantize_per_channel_affine_reference(
X.cpu(), scale.cpu(), zero_point.cpu(), axis, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
torch.testing.assert_close(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
self.assertTrue(Y.dtype == float_type)
def test_forward_per_channel_cachemask_cpu(self):
self._test_forward_per_channel_cachemask_impl('cpu')
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_forward_per_channel_cachemask_cuda(self):
self._test_forward_per_channel_cachemask_impl('cuda')
def test_forward_per_channel_half_precision_numerics(self):
scale = torch.randn(5).abs()
zero = torch.randn(5).to(dtype=torch.int)
axis = 1
mini = 0
maxi = 255
for _ in range(20):
X1 = torch.randn(4, 5).to(torch.float16)
Y1 = torch.fake_quantize_per_channel_affine(X1, scale, zero, axis, mini, maxi)
Y1r = _fake_quantize_per_channel_affine_reference(X1, scale, zero, axis, mini, maxi)
self.assertEqual(Y1, Y1r, rtol=tolerance, atol=tolerance)
# to force overflow
X2 = torch.randn(4, 5).to(torch.float16)
X2[0, 0] = 2**15 + .01
Y2 = torch.fake_quantize_per_channel_affine(X2, scale, zero, axis, mini, maxi)
Y2r = _fake_quantize_per_channel_affine_reference(X2, scale, zero, axis, mini, maxi)
self.assertEqual(Y2, Y2r, rtol=tolerance, atol=tolerance)
scale = torch.zeros(5) + 10
# to force underflow
X3 = torch.randn(4, 5).to(torch.float16)
X3[0, 0] = 2**-24
Y3 = torch.fake_quantize_per_channel_affine(X3, scale, zero, axis, mini, maxi)
Y3r = _fake_quantize_per_channel_affine_reference(X3, scale, zero, axis, mini, maxi)
self.assertEqual(Y3, Y3r, rtol=tolerance, atol=tolerance)
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_fake_quant_per_channel_qparam_range(self, X):
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
for device in ['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']:
X = to_tensor(X, device)
scale = to_tensor(scale, device)
# Ensure that zero_point < quant_min.
zero_point = torch.full(zero_point.shape, -1 - quant_min).to(dtype=torch.int32, device=device)
# For non-float zero_point, fakequant requires zero_point between quant_min and quant_max.
with self.assertRaisesRegex(RuntimeError, "`zero_point` must be between `quant_min` and `quant_max`."):
Y = torch.fake_quantize_per_channel_affine(X, scale, zero_point, axis, quant_min, quant_max)
# For float zero_point, fakequant can be outside quant_min and quant_max.
for zero_point_dtype in [torch.float32, torch.float16]:
zero_point = zero_point.to(dtype=zero_point_dtype)
Y = torch.fake_quantize_per_channel_affine(X, scale, zero_point, axis, quant_min, quant_max)
Y_ref = _fake_quantize_per_channel_affine_reference(X.cpu(), scale.cpu(), zero_point.cpu(),
axis, quant_min, quant_max)
np.testing.assert_allclose(Y.cpu().numpy(), Y_ref.cpu().numpy(), rtol=tolerance, atol=tolerance)
def _test_learnable_forward_per_channel(self, X_base, device, scale_base, zero_point_base, axis):
r"""Tests the forward path of the learnable FakeQuantizePerTensorAffine op.
"""
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** (n_bits) - 1
scale_base = scale_base.to(device)
zero_point_base = zero_point_base.to(device)
X_curr = X_base.clone()
scale_curr = scale_base.clone()
zero_point_curr = zero_point_base.clone()
Y = _fake_quantize_per_channel_affine_reference(
X_curr, scale_curr, zero_point_curr.round().clamp(quant_min, quant_max), axis, quant_min, quant_max).to(device)
for grad_factor in [0.1, 1.0, 10.0]:
Y_prime = torch._fake_quantize_learnable_per_channel_affine(
X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, grad_factor).to(device)
self.assertTrue(
torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),
"Expected kernel forward function to have results match the reference forward function")
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_forward_per_channel_cpu(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, axis, _) = X
X_base = torch.tensor(X).to('cpu')
channel_size = X_base.size(axis)
scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))
self._test_learnable_forward_per_channel(
X_base, 'cpu', scale_base, zero_point_base, axis)
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_forward_per_channel_cuda(self):
torch.random.manual_seed(NP_RANDOM_SEED)
shape = (2, 1, 2, 10)
axis = 1
for dtype in [torch.float32, torch.bfloat16]:
X_base = torch.randn(shape, device="cuda").to(dtype)
channel_size = X_base.size(axis)
scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100).to(dtype)
zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,)).to(dtype)
self._test_learnable_forward_per_channel(
X_base, 'cuda', scale_base, zero_point_base, axis)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_backward_per_channel(self, device, X):
r"""Tests the backward method.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
zero_point_types = (torch.int, torch.float, torch.float16)
for zero_point_type in zero_point_types:
X = to_tensor(X, device)
scale = to_tensor(scale, device)
zero_point = to_tensor(zero_point, device).to(dtype=zero_point_type)
X.requires_grad_()
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
dout = torch.rand_like(X, dtype=torch.float).to(device)
dX = _fake_quantize_per_channel_affine_grad_reference(
dout, X, scale, zero_point, axis, quant_min, quant_max)
Y_prime.backward(dout)
np.testing.assert_allclose(dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
def _test_backward_per_channel_cachemask_impl(self, device):
torch_types = (torch.qint8, torch.quint8)
float_types = (torch.float32, torch.float16, torch.float64)
zero_point_types = (torch.int, torch.float32, torch.float16)
for torch_type, float_type, zero_point_type in itertools.product(torch_types, float_types, zero_point_types):
X = torch.randn(1, 2, 4, 4, dtype=float_type).to(device)
# pick the scale + zp so that some values get clipped
axis = 1
obs = torch.ao.quantization.PerChannelMinMaxObserver(axis, torch_type).to(device)
obs(X * 0.75)
scale, zero_point = obs.calculate_qparams()
# TODO(future PR): fix the wrong dtype in obs.calculate_qparams and remove the cast
zero_point = zero_point.to(zero_point_type)
quant_min, quant_max = obs.quant_min, obs.quant_max
X.requires_grad_()
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
dout = torch.rand_like(X, dtype=float_type).to(device)
dX = _fake_quantize_per_channel_affine_grad_reference(
dout, X, scale, zero_point, axis, quant_min, quant_max)
Y_prime.backward(dout)
np.testing.assert_allclose(
dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
assert X.grad.dtype == float_type
def test_backward_per_channel_cachemask_cpu(self):
self._test_backward_per_channel_cachemask_impl('cpu')
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_backward_per_channel_cachemask_cuda(self):
self._test_backward_per_channel_cachemask_impl('cuda')
def _test_learnable_backward_per_channel(self, X_base, device, scale_base, zero_point_base, axis, dtype=torch.float32):
r"""Tests the backward path of the learnable FakeQuantizePerTensorAffine op.
"""
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** n_bits - 1
scale_base = scale_base.to(device)
zero_point_base = zero_point_base.to(device=device)
X_curr = X_base.clone()
X_curr.requires_grad_()
scale_curr = scale_base.clone()
scale_curr.requires_grad_()
zero_point_curr = zero_point_base.clone()
zero_point_curr.requires_grad_()
for grad_factor in [0.1, 1.0, 10.0]:
Y_prime = torch._fake_quantize_learnable_per_channel_affine(
X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, grad_factor).to(device)
dout = torch.rand(X_curr.shape, dtype=torch.float).to(device)
dX, dScale, dZeroPoint = _fake_quantize_learnable_per_channel_affine_grad_reference(
dout, X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, device, dtype)
Y_prime.backward(dout)
dX_expected = dX.to(device).detach()
dX_actual = X_curr.to(device).grad.detach()
dScale_expected = dScale.to(device).detach()
dScale_actual = scale_curr.to(device).grad.detach()
dZeroPoint_expected = dZeroPoint.to(device).detach()
dZeroPoint_actual = zero_point_curr.to(device).grad.detach()
# increasing tolerance for bf16 due to differences in python's x.to(torch.bfloat16) and cpp's x.to(at::kBFloat16)
# for example, -0.16749558 gets downcast to -1.68 (after applying grad_factor) in python
# in CPP, -1.6752 gets downcast to -1.67
tolerance = 1e-2 if dtype is torch.bfloat16 else 1e-4
self.assertTrue(
torch.allclose(dX_expected, dX_actual, rtol=tolerance, atol=tolerance),
f"Expected dX={dX_expected} to match X.grad={dX_actual}, X={X_curr}, s={scale_curr}, z={zero_point_curr}, dout={dout}, n_bits={n_bits}") # noqa: B950
self.assertTrue(
torch.allclose(dScale_expected * grad_factor, dScale_actual, rtol=tolerance, atol=tolerance),
f"Expected dScale={dScale_expected * grad_factor} to match scale.grad={dScale_actual}, X={X_curr}, s={scale_curr}, z={zero_point_curr}, dout={dout}, n_bits={n_bits}") # noqa: B950
self.assertTrue(
torch.allclose(dZeroPoint_expected * grad_factor, dZeroPoint_actual, rtol=tolerance, atol=tolerance),
f"Expected dZeroPoint={dZeroPoint_expected * grad_factor} to match zero_point.grad={dZeroPoint_actual}, X={X_curr}, s={scale_curr}, z={zero_point_curr}, dout={dout}, n_bits={n_bits}") # noqa: B950
X_curr.grad.data.zero_()
scale_curr.grad.data.zero_()
zero_point_curr.grad.data.zero_()
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_learnable_backward_per_channel_cpu(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, axis, _) = X
X_base = torch.tensor(X).to('cpu')
channel_size = X_base.size(axis)
scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))
self._test_learnable_backward_per_channel(
X_base, 'cpu', scale_base, zero_point_base, axis)
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_backward_per_channel_cuda(self):
torch.random.manual_seed(NP_RANDOM_SEED)
x_shape = (2, 1)
scale_shape = (2,)
zero_point_shape = (2,)
axis = 0
for dtype in [torch.bfloat16, torch.float32]:
X_base = torch.randn(x_shape, dtype=dtype, device='cuda')
scale_base = torch.randn(scale_shape, dtype=dtype, device='cuda')
zero_point_base = torch.randint(0, 10, zero_point_shape, device='cuda').to(dtype=dtype)
self._test_learnable_backward_per_channel(
X_base, 'cuda', scale_base, zero_point_base, axis, dtype
)
def test_numerical_consistency_per_tensor(self):
self._test_numerical_consistency('per_tensor')
def test_numerical_consistency_per_channel(self):
self._test_numerical_consistency('per_channel')
def _test_numerical_consistency(self, test_type):
r"""Comparing numerical consistency between quantize/dequantize op and the fake quantize op across devices and dtypes
"""
torch.random.manual_seed(NP_RANDOM_SEED)
torch_types = [torch.qint8, torch.quint8]
float_types = [torch.float, torch.float16, torch.float64]
if test_type == "per_channel":
zero_types = [torch.int, torch.float, torch.float16]
else:
zero_types = [torch.int]
devices = [torch.device('cpu'), torch.device('cuda')] if torch.cuda.is_available() else [torch.device('cpu')]
axis = 1
for _ in range(20):
for torch_type, float_type, device, zero_type in itertools.product(torch_types, float_types, devices, zero_types):
X = torch.randn(3, 3, device=device).to(float_type)
scales = (10 * torch.randn(3, device=device)).abs()
scale = scales.mean().to(float).item()
zeros = (10 * torch.randn(3, device=device)).abs().to(dtype=zero_type)
zero = zeros.max().view(1).item()
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
test_was_run = False
if test_type == "per_tensor":
test_was_run = True
Y = torch.dequantize(torch.quantize_per_tensor(X.to('cpu').to(torch.float),
scale, zero, torch_type)).to(device).to(float_type)
Y_prime = torch.fake_quantize_per_tensor_affine(X, scale, zero, quant_min, quant_max)
self.assertEqual(
Y, Y_prime, "Difference found between dequant+quant_per_tensor and fake_quantize_per_tensor")
if test_type == "per_channel":
test_was_run = True
Y = torch.dequantize(torch.quantize_per_channel(X.to('cpu').to(torch.float), scales.to(
'cpu'), zeros.to('cpu'), axis, torch_type)).to(device).to(float_type)
Y_prime = torch.fake_quantize_per_channel_affine(X, scales, zeros, axis, quant_min, quant_max)
self.assertEqual(
Y, Y_prime, "Difference found between dequant+quant_per_channel and fake_quantize_per_channel")
self.assertTrue(test_was_run)
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_fake_quantize_per_channel_affine_scale_dtypes(self):
"""
Ensure the error message is more helpful
"""
dtype_list = [torch.float, torch.float64, torch.bfloat16, torch.half]
for scale_dtype in dtype_list:
input = torch.randn(3, 4, 5, 6)
scale = torch.Tensor([0.1, 0.2, 0.3, 0.4]).to(scale_dtype)
zero_point = torch.tensor([1, 2, 3, 4], dtype=torch.int32)
axis = 1
quant_min = 0
quant_max = 255
if scale_dtype != torch.float:
with self.assertRaises(RuntimeError):
torch.fake_quantize_per_channel_affine(
input, scale, zero_point, axis, quant_min, quant_max
)
else:
torch.fake_quantize_per_channel_affine(
input, scale, zero_point, axis, quant_min, quant_max
)
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
@unittest.skipIf(TEST_WITH_ROCM, "Not a suitable test for ROCM")
@given(dtype=st.sampled_from([torch.float, torch.float64, torch.half, torch.bfloat16]),
device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']))
def test_fake_quantize_per_tensor_affine_inf(self, dtype, device) -> None:
# https://github.com/pytorch/pytorch/issues/154328
input_tensor = torch.tensor([torch.inf], dtype=dtype).to(device)
scale = 0.01
zero_point = 0
quant_min = 0
quant_max = 255
result = torch.fake_quantize_per_tensor_affine(input_tensor, scale, zero_point, quant_min, quant_max)
ref_result = (min(quant_max, max(quant_min, torch.round(input_tensor / scale) + zero_point)) - zero_point) * scale
ref_result = torch.Tensor([ref_result]).to(dtype).to(device)
self.assertEqual(result, ref_result)
|
TestFakeQuantizeOps
|
python
|
doocs__leetcode
|
solution/3200-3299/3239.Minimum Number of Flips to Make Binary Grid Palindromic I/Solution.py
|
{
"start": 0,
"end": 446
}
|
class ____:
def minFlips(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
cnt1 = cnt2 = 0
for row in grid:
for j in range(n // 2):
if row[j] != row[n - j - 1]:
cnt1 += 1
for j in range(n):
for i in range(m // 2):
if grid[i][j] != grid[m - i - 1][j]:
cnt2 += 1
return min(cnt1, cnt2)
|
Solution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.