Dataset Viewer
Auto-converted to Parquet Duplicate
repo
string
pull_number
int64
instance_id
string
issue_numbers
list
base_commit
string
patch
string
test_patch
string
problem_statement
string
hints_text
string
created_at
timestamp[s]
language
string
label
string
getlogbook/logbook
183
getlogbook__logbook-183
[ "94" ]
1d999a784d0d8f5f7423f25c684cc1100843ccc5
diff --git a/logbook/handlers.py b/logbook/handlers.py --- a/logbook/handlers.py +++ b/logbook/handlers.py @@ -20,6 +20,7 @@ except ImportError: from sha import new as sha1 import traceback +import collections from datetime import datetime, timedelta from collections import deque from textwrap import dedent @@ -1014,14 +1015,42 @@ class MailHandler(Handler, StringFormatterHandlerMixin, The default timedelta is 60 seconds (one minute). - The mail handler is sending mails in a blocking manner. If you are not + The mail handler sends mails in a blocking manner. If you are not using some centralized system for logging these messages (with the help of ZeroMQ or others) and the logging system slows you down you can wrap the handler in a :class:`logbook.queues.ThreadedWrapperHandler` that will then send the mails in a background thread. + `server_addr` can be a tuple of host and port, or just a string containing + the host to use the default port (25, or 465 if connecting securely.) + + `credentials` can be a tuple or dictionary of arguments that will be passed + to :py:meth:`smtplib.SMTP.login`. + + `secure` can be a tuple, dictionary, or boolean. As a boolean, this will + simply enable or disable a secure connection. The tuple is unpacked as + parameters `keyfile`, `certfile`. As a dictionary, `secure` should contain + those keys. For backwards compatibility, ``secure=()`` will enable a secure + connection. If `starttls` is enabled (default), these parameters will be + passed to :py:meth:`smtplib.SMTP.starttls`, otherwise + :py:class:`smtplib.SMTP_SSL`. + + .. versionchanged:: 0.3 The handler supports the batching system now. + + .. versionadded:: 1.0 + `starttls` parameter added to allow disabling STARTTLS for SSL + connections. + + .. versionchanged:: 1.0 + If `server_addr` is a string, the default port will be used. + + .. versionchanged:: 1.0 + `credentials` parameter can now be a dictionary of keyword arguments. + + .. versionchanged:: 1.0 + `secure` can now be a dictionary or boolean in addition to to a tuple. """ default_format_string = MAIL_FORMAT_STRING default_related_format_string = MAIL_RELATED_FORMAT_STRING @@ -1039,7 +1068,7 @@ def __init__(self, from_addr, recipients, subject=None, server_addr=None, credentials=None, secure=None, record_limit=None, record_delta=None, level=NOTSET, format_string=None, related_format_string=None, - filter=None, bubble=False): + filter=None, bubble=False, starttls=True): Handler.__init__(self, level, filter, bubble) StringFormatterHandlerMixin.__init__(self, format_string) LimitingHandlerMixin.__init__(self, record_limit, record_delta) @@ -1054,6 +1083,7 @@ def __init__(self, from_addr, recipients, subject=None, if related_format_string is None: related_format_string = self.default_related_format_string self.related_format_string = related_format_string + self.starttls = starttls def _get_related_format_string(self): if isinstance(self.related_formatter, StringFormatter): @@ -1148,20 +1178,63 @@ def get_connection(self): """Returns an SMTP connection. By default it reconnects for each sent mail. """ - from smtplib import SMTP, SMTP_PORT, SMTP_SSL_PORT + from smtplib import SMTP, SMTP_SSL, SMTP_PORT, SMTP_SSL_PORT if self.server_addr is None: host = '127.0.0.1' port = self.secure and SMTP_SSL_PORT or SMTP_PORT else: - host, port = self.server_addr - con = SMTP() - con.connect(host, port) + try: + host, port = self.server_addr + except ValueError: + # If server_addr is a string, the tuple unpacking will raise + # ValueError, and we can use the default port. + host = self.server_addr + port = self.secure and SMTP_SSL_PORT or SMTP_PORT + + # Previously, self.secure was passed as con.starttls(*self.secure). This + # meant that starttls couldn't be used without a keyfile and certfile + # unless an empty tuple was passed. See issue #94. + # + # The changes below allow passing: + # - secure=True for secure connection without checking identity. + # - dictionary with keys 'keyfile' and 'certfile'. + # - tuple to be unpacked to variables keyfile and certfile. + # - secure=() equivalent to secure=True for backwards compatibility. + # - secure=False equivalent to secure=None to disable. + if isinstance(self.secure, collections.Mapping): + keyfile = self.secure.get('keyfile', None) + certfile = self.secure.get('certfile', None) + elif isinstance(self.secure, collections.Iterable): + # Allow empty tuple for backwards compatibility + if len(self.secure) == 0: + keyfile = certfile = None + else: + keyfile, certfile = self.secure + else: + keyfile = certfile = None + + # Allow starttls to be disabled by passing starttls=False. + if not self.starttls and self.secure: + con = SMTP_SSL(host, port, keyfile=keyfile, certfile=certfile) + else: + con = SMTP(host, port) + if self.credentials is not None: - if self.secure is not None: + secure = self.secure + if self.starttls and secure is not None and secure is not False: con.ehlo() - con.starttls(*self.secure) + con.starttls(keyfile=keyfile, certfile=certfile) con.ehlo() - con.login(*self.credentials) + + # Allow credentials to be a tuple or dict. + if isinstance(self.credentials, collections.Mapping): + credentials_args = () + credentials_kwargs = self.credentials + else: + credentials_args = self.credentials + credentials_kwargs = dict() + + con.login(*credentials_args, **credentials_kwargs) return con def close_connection(self, con): @@ -1175,7 +1248,7 @@ def close_connection(self, con): pass def deliver(self, msg, recipients): - """Delivers the given message to a list of recpients.""" + """Delivers the given message to a list of recipients.""" con = self.get_connection() try: con.sendmail(self.from_addr, recipients, msg.as_string()) @@ -1227,7 +1300,7 @@ class GMailHandler(MailHandler): def __init__(self, account_id, password, recipients, **kw): super(GMailHandler, self).__init__( - account_id, recipients, secure=(), + account_id, recipients, secure=True, server_addr=("smtp.gmail.com", 587), credentials=(account_id, password), **kw) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -158,6 +158,10 @@ def status_msgs(*msgs): extras_require = dict() extras_require['test'] = set(['pytest', 'pytest-cov']) + +if sys.version_info[:2] < (3, 3): + extras_require['test'] |= set(['mock']) + extras_require['dev'] = set(['cython']) | extras_require['test'] extras_require['execnet'] = set(['execnet>=1.0.9'])
diff --git a/tests/test_mail_handler.py b/tests/test_mail_handler.py --- a/tests/test_mail_handler.py +++ b/tests/test_mail_handler.py @@ -7,6 +7,11 @@ from .utils import capturing_stderr_context, make_fake_mail_handler +try: + from unittest.mock import Mock, call, patch +except ImportError: + from mock import Mock, call, patch + __file_without_pyc__ = __file__ if __file_without_pyc__.endswith('.pyc'): __file_without_pyc__ = __file_without_pyc__[:-1] @@ -104,3 +109,126 @@ def test_group_handler_mail_combo(activation_strategy, logger): assert len(related) == 2 assert re.search('Message type:\s+WARNING', related[0]) assert re.search('Message type:\s+DEBUG', related[1]) + + +def test_mail_handler_arguments(): + with patch('smtplib.SMTP', autospec=True) as mock_smtp: + + # Test the mail handler with supported arguments before changes to + # secure, credentials, and starttls + mail_handler = logbook.MailHandler( + from_addr='from@example.com', + recipients='to@example.com', + server_addr=('server.example.com', 465), + credentials=('username', 'password'), + secure=('keyfile', 'certfile')) + + mail_handler.get_connection() + + assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.method_calls[1] == call().starttls( + keyfile='keyfile', certfile='certfile') + assert mock_smtp.method_calls[3] == call().login('username', 'password') + + # Test secure=() + mail_handler = logbook.MailHandler( + from_addr='from@example.com', + recipients='to@example.com', + server_addr=('server.example.com', 465), + credentials=('username', 'password'), + secure=()) + + mail_handler.get_connection() + + assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.method_calls[5] == call().starttls( + certfile=None, keyfile=None) + assert mock_smtp.method_calls[7] == call().login('username', 'password') + + # Test implicit port with string server_addr, dictionary credentials, + # dictionary secure. + mail_handler = logbook.MailHandler( + from_addr='from@example.com', + recipients='to@example.com', + server_addr='server.example.com', + credentials={'user': 'username', 'password': 'password'}, + secure={'certfile': 'certfile2', 'keyfile': 'keyfile2'}) + + mail_handler.get_connection() + + assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.method_calls[9] == call().starttls( + certfile='certfile2', keyfile='keyfile2') + assert mock_smtp.method_calls[11] == call().login( + user='username', password='password') + + # Test secure=True + mail_handler = logbook.MailHandler( + from_addr='from@example.com', + recipients='to@example.com', + server_addr=('server.example.com', 465), + credentials=('username', 'password'), + secure=True) + + mail_handler.get_connection() + + assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.method_calls[13] == call().starttls( + certfile=None, keyfile=None) + assert mock_smtp.method_calls[15] == call().login('username', 'password') + assert len(mock_smtp.method_calls) == 16 + + # Test secure=False + mail_handler = logbook.MailHandler( + from_addr='from@example.com', + recipients='to@example.com', + server_addr=('server.example.com', 465), + credentials=('username', 'password'), + secure=False) + + mail_handler.get_connection() + + # starttls not called because we check len of method_calls before and + # after this test. + assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.method_calls[16] == call().login('username', 'password') + assert len(mock_smtp.method_calls) == 17 + + with patch('smtplib.SMTP_SSL', autospec=True) as mock_smtp_ssl: + # Test starttls=False + mail_handler = logbook.MailHandler( + from_addr='from@example.com', + recipients='to@example.com', + server_addr='server.example.com', + credentials={'user': 'username', 'password': 'password'}, + secure={'certfile': 'certfile', 'keyfile': 'keyfile'}, + starttls=False) + + mail_handler.get_connection() + + assert mock_smtp_ssl.call_args == call( + 'server.example.com', 465, keyfile='keyfile', certfile='certfile') + assert mock_smtp_ssl.method_calls[0] == call().login( + user='username', password='password') + + # Test starttls=False with secure=True + mail_handler = logbook.MailHandler( + from_addr='from@example.com', + recipients='to@example.com', + server_addr='server.example.com', + credentials={'user': 'username', 'password': 'password'}, + secure=True, + starttls=False) + + mail_handler.get_connection() + + assert mock_smtp_ssl.call_args == call( + 'server.example.com', 465, keyfile=None, certfile=None) + assert mock_smtp_ssl.method_calls[1] == call().login( + user='username', password='password') + + + + + +
SMTP Handler STARTTLS Due to the lack of documentation on this handler it took a little digging to work out how to get it to work... One thing that confused me was the "secure" argument. Python SMTPLib starttls() accepts two optional values: a keyfile and certfile - but these are only required for _checking_ the identity. If neither are specified then SMTPLib will still try establish an encrypted connection but without checking the identity. If you do not specify an argument to Logbook, it will not attempt to establish an encrypted connection at all. So, if you want a tls connection to the SMTP server but don't care about checking the identity you can do `secure = []` which will pass the `if self.secure is not None`, however if you do `secure = True` you will get an error because you cannot unpack a boolean! (as logbook populates the arguments using: `conn.starttls(*self.secure)`). It'd help if the documentation explained the arguments for the mail handlers.
You're right. A simple solution is to use `secure = ()`, but I agree it has to be better documented.
2015-12-03T01:44:29
python
Easy
rigetti/pyquil
399
rigetti__pyquil-399
[ "398", "398" ]
d6a0e29b2b1a506a48977a9d8432e70ec699af34
diff --git a/pyquil/parameters.py b/pyquil/parameters.py --- a/pyquil/parameters.py +++ b/pyquil/parameters.py @@ -31,9 +31,11 @@ def format_parameter(element): out += repr(r) if i == 1: - out += 'i' + assert np.isclose(r, 0, atol=1e-14) + out = 'i' elif i == -1: - out += '-i' + assert np.isclose(r, 0, atol=1e-14) + out = '-i' elif i < 0: out += repr(i) + 'i' else:
diff --git a/pyquil/tests/test_parameters.py b/pyquil/tests/test_parameters.py --- a/pyquil/tests/test_parameters.py +++ b/pyquil/tests/test_parameters.py @@ -14,6 +14,8 @@ def test_format_parameter(): (1j, 'i'), (0 + 1j, 'i'), (-1j, '-i'), + (1e-15 + 1j, 'i'), + (1e-15 - 1j, '-i') ] for test_case in test_cases:
DEFGATEs are not correct There is a problem with DEFGATEs that has manifested itself in the `phase_estimation` module of Grove (brought to our attention here: https://github.com/rigetticomputing/grove/issues/145). I have traced the problem to commit d309ac11dabd9ea9c7ffa57dd26e68b5e7129aa9 Each of the below test cases should deterministically return the input phase, for both `phase_estimation` and `estimate_gradient`. With this commit, result is not correct and nondeterministic for phase=3/4. ``` import numpy as np import scipy.linalg import pyquil.api as api from grove.alpha.phaseestimation.phase_estimation import phase_estimation from grove.alpha.jordan_gradient.gradient_utils import * from grove.alpha.jordan_gradient.jordan_gradient import estimate_gradient qvm = api.QVMConnection() trials = 1 precision = 8 for phase in [1/2, 1/4, 3/4, 1/8, 1/16, 1/32]: Z = np.asarray([[1.0, 0.0], [0.0, -1.0]]) Rz = scipy.linalg.expm(-1j*Z*np.pi*phase) p = phase_estimation(Rz, precision) out = qvm.run(p, list(range(precision)), trials) wf = qvm.wavefunction(p) bf_estimate = measurements_to_bf(out) bf_explicit = '{0:.16f}'.format(bf_estimate) deci_estimate = binary_to_real(bf_explicit) print('phase: ', phase) print('pe', deci_estimate) print('jg', estimate_gradient(phase, precision, n_measurements=trials, cxn=qvm)) print('\n') ``` DEFGATEs are not correct There is a problem with DEFGATEs that has manifested itself in the `phase_estimation` module of Grove (brought to our attention here: https://github.com/rigetticomputing/grove/issues/145). I have traced the problem to commit d309ac11dabd9ea9c7ffa57dd26e68b5e7129aa9 Each of the below test cases should deterministically return the input phase, for both `phase_estimation` and `estimate_gradient`. With this commit, result is not correct and nondeterministic for phase=3/4. ``` import numpy as np import scipy.linalg import pyquil.api as api from grove.alpha.phaseestimation.phase_estimation import phase_estimation from grove.alpha.jordan_gradient.gradient_utils import * from grove.alpha.jordan_gradient.jordan_gradient import estimate_gradient qvm = api.QVMConnection() trials = 1 precision = 8 for phase in [1/2, 1/4, 3/4, 1/8, 1/16, 1/32]: Z = np.asarray([[1.0, 0.0], [0.0, -1.0]]) Rz = scipy.linalg.expm(-1j*Z*np.pi*phase) p = phase_estimation(Rz, precision) out = qvm.run(p, list(range(precision)), trials) wf = qvm.wavefunction(p) bf_estimate = measurements_to_bf(out) bf_explicit = '{0:.16f}'.format(bf_estimate) deci_estimate = binary_to_real(bf_explicit) print('phase: ', phase) print('pe', deci_estimate) print('jg', estimate_gradient(phase, precision, n_measurements=trials, cxn=qvm)) print('\n') ```
2018-04-20T17:39:41
python
Hard
marcelotduarte/cx_Freeze
2,220
marcelotduarte__cx_Freeze-2220
[ "2210" ]
639141207611f0edca554978f66b1ed7df3d8cdf
diff --git a/cx_Freeze/winversioninfo.py b/cx_Freeze/winversioninfo.py --- a/cx_Freeze/winversioninfo.py +++ b/cx_Freeze/winversioninfo.py @@ -12,16 +12,16 @@ __all__ = ["Version", "VersionInfo"] +# types +CHAR = "c" +WCHAR = "ss" +WORD = "=H" +DWORD = "=L" + # constants RT_VERSION = 16 ID_VERSION = 1 -# types -CHAR = "c" -DWORD = "L" -WCHAR = "H" -WORD = "H" - VS_FFI_SIGNATURE = 0xFEEF04BD VS_FFI_STRUCVERSION = 0x00010000 VS_FFI_FILEFLAGSMASK = 0x0000003F @@ -32,6 +32,8 @@ KEY_STRING_TABLE = "040904E4" KEY_VAR_FILE_INFO = "VarFileInfo" +COMMENTS_MAX_LEN = (64 - 2) * 1024 // calcsize(WCHAR) + # To disable the experimental feature in Windows: # set CX_FREEZE_STAMP=pywin32 # pip install -U pywin32 @@ -82,7 +84,7 @@ def to_buffer(self): data = data.to_buffer() elif isinstance(data, str): data = data.encode("utf-16le") - elif isinstance(fmt, str): + elif isinstance(data, int): data = pack(fmt, data) buffer += data return buffer @@ -142,7 +144,9 @@ def __init__( value_len = value.wLength fields.append(("Value", type(value))) elif isinstance(value, Structure): - value_len = calcsize("".join([f[1] for f in value._fields])) + value_len = 0 + for field in value._fields: + value_len += calcsize(field[1]) value_type = 0 fields.append(("Value", type(value))) @@ -199,7 +203,8 @@ def __init__( self.valid_version: Version = valid_version self.internal_name: str | None = internal_name self.original_filename: str | None = original_filename - self.comments: str | None = comments + # comments length must be limited to 31kb + self.comments: str = comments[:COMMENTS_MAX_LEN] if comments else None self.company: str | None = company self.description: str | None = description self.copyright: str | None = copyright @@ -221,6 +226,8 @@ def stamp(self, path: str | Path) -> None: version_stamp = import_module("win32verstamp").stamp except ImportError as exc: raise RuntimeError("install pywin32 extension first") from exc + # comments length must be limited to 15kb (uses WORD='h') + self.comments = (self.comments or "")[: COMMENTS_MAX_LEN // 2] version_stamp(os.fspath(path), self) return @@ -263,17 +270,18 @@ def version_info(self, path: Path) -> String: elif len(self.valid_version.release) >= 4: build = self.valid_version.release[3] + # use the data in the order shown in 'pepper' data = { - "Comments": self.comments or "", - "CompanyName": self.company or "", "FileDescription": self.description or "", "FileVersion": self.version, "InternalName": self.internal_name or path.name, + "CompanyName": self.company or "", "LegalCopyright": self.copyright or "", "LegalTrademarks": self.trademarks or "", "OriginalFilename": self.original_filename or path.name, "ProductName": self.product or "", "ProductVersion": str(self.valid_version), + "Comments": self.comments or "", } is_dll = self.dll if is_dll is None: @@ -311,6 +319,7 @@ def version_info(self, path: Path) -> String: string_version_info = String(KEY_VERSION_INFO, fixed_file_info) string_version_info.children(string_file_info) string_version_info.children(var_file_info) + return string_version_info
diff --git a/tests/test_winversioninfo.py b/tests/test_winversioninfo.py --- a/tests/test_winversioninfo.py +++ b/tests/test_winversioninfo.py @@ -9,7 +9,12 @@ import pytest from generate_samples import create_package, run_command -from cx_Freeze.winversioninfo import Version, VersionInfo, main_test +from cx_Freeze.winversioninfo import ( + COMMENTS_MAX_LEN, + Version, + VersionInfo, + main_test, +) PLATFORM = get_platform() PYTHON_VERSION = get_python_version() @@ -97,6 +102,14 @@ def test___init__with_kwargs(self): assert version_instance.debug is input_debug assert version_instance.verbose is input_verbose + def test_big_comment(self): + """Tests a big comment value for the VersionInfo class.""" + input_version = "9.9.9.9" + input_comments = "TestComment" + "=" * COMMENTS_MAX_LEN + version_instance = VersionInfo(input_version, comments=input_comments) + assert version_instance.version == "9.9.9.9" + assert version_instance.comments == input_comments[:COMMENTS_MAX_LEN] + @pytest.mark.parametrize( ("input_version", "version"), [
Cannot freeze python-3.12 code on Windows 11 **Describe the bug** Cannot freeze python 3.12 code on Windows 11 Pro amd64 using cx_Freeze 6.16.aplha versions, last I tried is 20. This was working fine three weeks ago, but suddenly it started to fail like this: ``` copying C:\Users\jmarcet\scoop\apps\openjdk17\17.0.2-8\bin\api-ms-win-core-console-l1-2-0.dll -> C:\Users\jmarcet\src\movistar-u7d\build\exe.win-amd64-3.12\api-ms-win-core-console-l1-2-0.dll copying C:\Users\jmarcet\scoop\apps\python312\3.12.1\python312.dll -> C:\Users\jmarcet\src\movistar-u7d\build\exe.win-amd64-3.12\python312.dll WARNING: cannot find 'api-ms-win-core-path-l1-1-0.dll' copying C:\Users\jmarcet\scoop\persist\python312\Lib\site-packages\cx_Freeze\bases\console-cpython-312-win_amd64.exe -> C:\Users\jmarcet\src\movistar-u7d\build\exe.win-amd64-3.12\movistar_epg.exe copying C:\Users\jmarcet\scoop\persist\python312\Lib\site-packages\cx_Freeze\initscripts\frozen_application_license.txt -> C:\Users\jmarcet\src\movistar-u7d\build\exe.win-amd64-3.12\frozen_application_license.txt data=72092 Traceback (most recent call last): File "C:\Users\jmarcet\src\movistar-u7d\setup.py", line 25, in <module> setup( File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\cx_Freeze\__init__.py", line 68, in setup setuptools.setup(**attrs) File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\setuptools\__init__.py", line 103, in setup return distutils.core.setup(**attrs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\setuptools\_distutils\core.py", line 185, in setup return run_commands(dist) ^^^^^^^^^^^^^^^^^^ File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\setuptools\_distutils\core.py", line 201, in run_commands dist.run_commands() File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\setuptools\_distutils\dist.py", line 969, in run_commands self.run_command(cmd) File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\setuptools\dist.py", line 963, in run_command super().run_command(command) File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\setuptools\_distutils\dist.py", line 988, in run_command cmd_obj.run() File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\setuptools\_distutils\command\build.py", line 131, in run self.run_command(cmd_name) File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\setuptools\_distutils\cmd.py", line 318, in run_command self.distribution.run_command(command) File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\setuptools\dist.py", line 963, in run_command super().run_command(command) File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\setuptools\_distutils\dist.py", line 988, in run_command cmd_obj.run() File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\cx_Freeze\command\build_exe.py", line 284, in run freezer.freeze() File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\cx_Freeze\freezer.py", line 731, in freeze self._freeze_executable(executable) File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\cx_Freeze\freezer.py", line 323, in _freeze_executable self._add_resources(exe) File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\cx_Freeze\freezer.py", line 794, in _add_resources version.stamp(target_path) File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\cx_Freeze\winversioninfo.py", line 240, in stamp handle, RT_VERSION, ID_VERSION, string_version_info.to_buffer() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\jmarcet\scoop\apps\python312\current\Lib\site-packages\cx_Freeze\winversioninfo.py", line 96, in to_buffer data = pack(fmt, data) ^^^^^^^^^^^^^^^ struct.error: 'H' format requires 0 <= number <= 65535 ``` **To Reproduce** ``` git clone -b next https://github.com/jmarcet/movistar-u7d cd movistar-u7d pip install --force --no-cache --pre --upgrade --extra-index-url https://marcelotduarte.github.io/packages/cx_Freeze pip install -r requirements-win.txt python .\setup.py build ``` **Expected behavior** Frozen artifacts saved under `build` dir **Desktop (please complete the following information):** - Platform information: Windows 11 Pro - OS architecture: amd64 - cx_Freeze version: [cx_Freeze-6.16.0.dev20-cp312-cp312-win_amd64.whl](https://marcelotduarte.github.io/packages/cx-freeze/cx_Freeze-6.16.0.dev20-cp312-cp312-win_amd64.whl) - Python version: 3.12.1 **Additional context** I had initially reported it on #2153
Please check the version installed of cx_Freeze and setuptools with `pip list`. Successfully installed aiofiles-23.2.1 aiohttp-3.9.1 aiosignal-1.3.1 asyncio-3.4.3 asyncio_dgram-2.1.2 attrs-23.2.0 cx-Logging-3.1.0 **cx_Freeze-6.16.0.dev9** defusedxml-0.8.0rc2 filelock-3.13.1 frozenlist-1.4.1 httptools-0.6.1 idna-3.6 lief-0.14.0 multidict-6.0.4 prometheus-client-0.7.1 psutil-5.9.8 pywin32-306 sanic-22.6.2 sanic-prometheus-0.2.1 sanic-routing-22.3.0 **setuptools-68.2.2** tomli-2.0.1 ujson-5.9.0 websockets-10.4 wheel-0.41.2 wmi-1.5.1 xmltodict-0.13.0 yarl-1.9.4 You should update your requirements-win.txt, insert the first line: --extra-index-url https://marcelotduarte.github.io/packages/ OR install the new development release after the requirements. Also, update setuptools. @marcelotduarte I still have the same issue ``` > pip list Package Version ------------------ ------------ aiofiles 23.2.1 aiohttp 3.9.1 aiosignal 1.3.1 astroid 3.0.2 asttokens 2.4.1 asyncio 3.4.3 asyncio-dgram 2.1.2 attrs 23.2.0 bandit 1.7.6 certifi 2023.11.17 charset-normalizer 3.3.2 colorama 0.4.6 cx-Freeze 6.16.0.dev23 cx_Logging 3.1.0 decorator 5.1.1 defusedxml 0.7.1 dill 0.3.7 executing 2.0.1 filelock 3.13.1 frozenlist 1.4.1 gitdb 4.0.11 GitPython 3.1.41 httpie 3.2.2 httptools 0.6.1 idna 3.6 ipython 8.20.0 isort 5.13.2 jedi 0.19.1 lief 0.15.0 markdown-it-py 3.0.0 matplotlib-inline 0.1.6 mccabe 0.7.0 mdurl 0.1.2 multidict 6.0.4 parso 0.8.3 pbr 6.0.0 pip 23.2.1 platformdirs 4.1.0 prometheus-client 0.7.1 prompt-toolkit 3.0.43 psutil 5.9.8 pure-eval 0.2.2 Pygments 2.17.2 pylint 3.0.3 pynvim 0.5.0 PySocks 1.7.1 pywin32 306 PyYAML 6.0.1 requests 2.31.0 requests-toolbelt 1.0.0 rich 13.7.0 ruff 0.1.11 sanic 22.6.2 sanic-prometheus 0.2.1 sanic-routing 22.3.0 setuptools 69.0.3 six 1.16.0 smmap 5.0.1 stack-data 0.6.3 stevedore 5.1.0 tomli 2.0.1 tomlkit 0.12.3 traitlets 5.14.1 ujson 5.9.0 urllib3 2.1.0 wcwidth 0.2.13 websockets 10.4 wheel 0.42.0 WMI 1.5.1 xmltodict 0.13.0 yarl 1.9.4 ```
2024-01-25T06:06:14
python
Easy
pytest-dev/pytest-django
1,108
pytest-dev__pytest-django-1108
[ "1106" ]
6cf63b65e86870abf68ae1f376398429e35864e7
diff --git a/pytest_django/plugin.py b/pytest_django/plugin.py --- a/pytest_django/plugin.py +++ b/pytest_django/plugin.py @@ -362,8 +362,15 @@ def _get_option_with_source( @pytest.hookimpl(trylast=True) def pytest_configure(config: pytest.Config) -> None: - # Allow Django settings to be configured in a user pytest_configure call, - # but make sure we call django.setup() + if config.getoption("version", 0) > 0 or config.getoption("help", False): + return + + # Normally Django is set up in `pytest_load_initial_conftests`, but we also + # allow users to not set DJANGO_SETTINGS_MODULE/`--ds` and instead + # configure the Django settings in a `pytest_configure` hookimpl using e.g. + # `settings.configure(...)`. In this case, the `_setup_django` call in + # `pytest_load_initial_conftests` only partially initializes Django, and + # it's fully initialized here. _setup_django(config) @@ -470,8 +477,7 @@ def get_order_number(test: pytest.Item) -> int: @pytest.fixture(autouse=True, scope="session") def django_test_environment(request: pytest.FixtureRequest) -> Generator[None, None, None]: - """ - Ensure that Django is loaded and has its testing environment setup. + """Setup Django's test environment for the testing session. XXX It is a little dodgy that this is an autouse fixture. Perhaps an email fixture should be requested in order to be able to @@ -481,7 +487,6 @@ def django_test_environment(request: pytest.FixtureRequest) -> Generator[None, N we need to follow this model. """ if django_settings_is_configured(): - _setup_django(request.config) from django.test.utils import setup_test_environment, teardown_test_environment debug_ini = request.config.getini("django_debug_mode")
diff --git a/tests/test_manage_py_scan.py b/tests/test_manage_py_scan.py --- a/tests/test_manage_py_scan.py +++ b/tests/test_manage_py_scan.py @@ -144,6 +144,37 @@ def test_django_project_found_invalid_settings_version( result.stdout.fnmatch_lines(["*usage:*"]) +@pytest.mark.django_project(project_root="django_project_root", create_manage_py=True) +def test_django_project_late_settings_version( + django_pytester: DjangoPytester, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Late configuration should not cause an error with --help or --version.""" + monkeypatch.delenv("DJANGO_SETTINGS_MODULE") + django_pytester.makepyfile( + t="WAT = 1", + ) + django_pytester.makeconftest( + """ + import os + + def pytest_configure(): + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 't') + from django.conf import settings + settings.WAT + """ + ) + + result = django_pytester.runpytest_subprocess("django_project_root", "--version", "--version") + assert result.ret == 0 + + result.stdout.fnmatch_lines(["*This is pytest version*"]) + + result = django_pytester.runpytest_subprocess("django_project_root", "--help") + assert result.ret == 0 + result.stdout.fnmatch_lines(["*usage:*"]) + + @pytest.mark.django_project(project_root="django_project_root", create_manage_py=True) def test_runs_without_error_on_long_args(django_pytester: DjangoPytester) -> None: django_pytester.create_test_module(
`pytest --help` fails in a partially configured app having a difficult time narrowing down a minimal example -- the repo involved is https://github.com/getsentry/sentry I have figured out _why_ it is happening and the stacktrace for it: <summary>full stacktrace with error <details> ```console $ pytest --help /Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/trio/_core/_multierror.py:511: RuntimeWarning: You seem to already have a custom sys.excepthook handler installed. I'll skip installing Trio's custom handler, but this means MultiErrors will not show full tracebacks. warnings.warn( Traceback (most recent call last): File "/Users/asottile/workspace/sentry/.venv/bin/pytest", line 8, in <module> sys.exit(console_main()) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/_pytest/config/__init__.py", line 190, in console_main code = main() File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/_pytest/config/__init__.py", line 167, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/hooks.py", line 286, in __call__ return self._hookexec(self, self.get_hookimpls(), kwargs) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/manager.py", line 93, in _hookexec return self._inner_hookexec(hook, methods, kwargs) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/manager.py", line 84, in <lambda> self._inner_hookexec = lambda hook, methods, kwargs: hook.multicall( File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/callers.py", line 208, in _multicall return outcome.get_result() File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/callers.py", line 80, in get_result raise ex[1].with_traceback(ex[2]) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/callers.py", line 187, in _multicall res = hook_impl.function(*args) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/_pytest/helpconfig.py", line 152, in pytest_cmdline_main config._do_configure() File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/_pytest/config/__init__.py", line 1037, in _do_configure self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/hooks.py", line 308, in call_historic res = self._hookexec(self, self.get_hookimpls(), kwargs) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/manager.py", line 93, in _hookexec return self._inner_hookexec(hook, methods, kwargs) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/manager.py", line 84, in <lambda> self._inner_hookexec = lambda hook, methods, kwargs: hook.multicall( File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/callers.py", line 208, in _multicall return outcome.get_result() File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/callers.py", line 80, in get_result raise ex[1].with_traceback(ex[2]) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pluggy/callers.py", line 187, in _multicall res = hook_impl.function(*args) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pytest_django/plugin.py", line 367, in pytest_configure _setup_django(config) File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/pytest_django/plugin.py", line 238, in _setup_django blocking_manager = config.stash[blocking_manager_key] File "/Users/asottile/workspace/sentry/.venv/lib/python3.10/site-packages/_pytest/stash.py", line 80, in __getitem__ return cast(T, self._storage[key]) KeyError: <_pytest.stash.StashKey object at 0x1066ab520> ``` </details> </summary> basically what's happening is the setup is skipped here: https://github.com/pytest-dev/pytest-django/blob/6cf63b65e86870abf68ae1f376398429e35864e7/pytest_django/plugin.py#L300-L301 normally it sets the thing that's being looked up here: https://github.com/pytest-dev/pytest-django/blob/6cf63b65e86870abf68ae1f376398429e35864e7/pytest_django/plugin.py#L358 which then fails to lookup here: https://github.com/pytest-dev/pytest-django/blob/6cf63b65e86870abf68ae1f376398429e35864e7/pytest_django/plugin.py#L238 something about sentry's `tests/conftest.py` initializes enough of django that `pytest-django` takes over. but since the setup has been skipped it fails to set up properly. I suspect that #238 is playing poorly with something. of note this worked before I upgraded `pytest-django` (I was previously on 4.4.0 and upgraded to 4.7.0 to get django 4.x support) will try and narrow down a smaller reproduction...
That’s a fun one! Hopefully using `config.stash.get()` calls and acting only on non-`None` values will be enough to fix the issue... here's a minimal case: ```console ==> t.py <== WAT = 1 ==> tests/__init__.py <== ==> tests/conftest.py <== import os def pytest_configure(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 't') from django.conf import settings settings.WAT ```
2024-01-29T14:22:15
python
Hard
marcelotduarte/cx_Freeze
2,597
marcelotduarte__cx_Freeze-2597
[ "2596" ]
df2c8aef8f92da535a1bb657706ca4496b1c3352
diff --git a/cx_Freeze/finder.py b/cx_Freeze/finder.py --- a/cx_Freeze/finder.py +++ b/cx_Freeze/finder.py @@ -537,7 +537,10 @@ def _replace_package_in_code(module: Module) -> CodeType: # Insert a bytecode to set __package__ as module.parent.name codes = [LOAD_CONST, pkg_const_index, STORE_NAME, pkg_name_index] codestring = bytes(codes) + code.co_code - consts.append(module.parent.name) + if module.file.stem == "__init__": + consts.append(module.name) + else: + consts.append(module.parent.name) code = code_object_replace( code, co_code=codestring, co_consts=consts ) diff --git a/cx_Freeze/hooks/scipy.py b/cx_Freeze/hooks/scipy.py --- a/cx_Freeze/hooks/scipy.py +++ b/cx_Freeze/hooks/scipy.py @@ -18,12 +18,18 @@ def load_scipy(finder: ModuleFinder, module: Module) -> None: """The scipy package. - Supported pypi and conda-forge versions (lasted tested version is 1.11.2). + Supported pypi and conda-forge versions (lasted tested version is 1.14.1). """ source_dir = module.file.parent.parent / f"{module.name}.libs" if source_dir.exists(): # scipy >= 1.9.2 (windows) - finder.include_files(source_dir, f"lib/{source_dir.name}") - replace_delvewheel_patch(module) + if IS_WINDOWS: + finder.include_files(source_dir, f"lib/{source_dir.name}") + replace_delvewheel_patch(module) + else: + target_dir = f"lib/{source_dir.name}" + for source in source_dir.iterdir(): + finder.lib_files[source] = f"{target_dir}/{source.name}" + finder.include_package("scipy.integrate") finder.include_package("scipy._lib") finder.include_package("scipy.misc")
diff --git a/samples/scipy/test_scipy.py b/samples/scipy/test_scipy.py --- a/samples/scipy/test_scipy.py +++ b/samples/scipy/test_scipy.py @@ -1,8 +1,6 @@ """A simple script to demonstrate scipy.""" -from scipy.stats import norm +from scipy.spatial.transform import Rotation if __name__ == "__main__": - print( - "bounds of distribution lower: {}, upper: {}".format(*norm.support()) - ) + print(Rotation.from_euler("XYZ", [10, 10, 10], degrees=True).as_matrix())
cx-Freeze - No module named 'scipy._lib.array_api_compat._aliases' **Prerequisite** This was previously reported in the closed issue #2544, where no action was taken. I include a minimal script that produces the problem for me. **Describe the bug** When running the compiled executable, i get the following error: ``` PS C:\dat\projects\gazeMapper\cxFreeze\build\exe.win-amd64-3.10> .\test.exe Traceback (most recent call last): File "C:\dat\projects\gazeMapper\cxFreeze\.venv\Lib\site-packages\cx_Freeze\initscripts\__startup__.py", line 141, in run module_init.run(f"__main__{name}") File "C:\dat\projects\gazeMapper\cxFreeze\.venv\Lib\site-packages\cx_Freeze\initscripts\console.py", line 25, in run exec(code, main_globals) File "C:\dat\projects\gazeMapper\cxFreeze\test.py", line 1, in <module> from scipy.spatial.transform import Rotation File "C:\dat\projects\gazeMapper\cxFreeze\.venv\lib\site-packages\scipy\spatial\__init__.py", line 110, in <module> from ._kdtree import * File "C:\dat\projects\gazeMapper\cxFreeze\.venv\lib\site-packages\scipy\spatial\_kdtree.py", line 4, in <module> from ._ckdtree import cKDTree, cKDTreeNode File "_ckdtree.pyx", line 11, in init scipy.spatial._ckdtree File "C:\dat\projects\gazeMapper\cxFreeze\.venv\lib\site-packages\scipy\sparse\__init__.py", line 293, in <module> from ._base import * File "C:\dat\projects\gazeMapper\cxFreeze\.venv\lib\site-packages\scipy\sparse\_base.py", line 5, in <module> from ._sputils import (asmatrix, check_reshape_kwargs, check_shape, File "C:\dat\projects\gazeMapper\cxFreeze\.venv\lib\site-packages\scipy\sparse\_sputils.py", line 10, in <module> from scipy._lib._util import np_long, np_ulong File "C:\dat\projects\gazeMapper\cxFreeze\.venv\lib\site-packages\scipy\_lib\_util.py", line 18, in <module> from scipy._lib._array_api import array_namespace, is_numpy, size as xp_size File "C:\dat\projects\gazeMapper\cxFreeze\.venv\lib\site-packages\scipy\_lib\_array_api.py", line 21, in <module> from scipy._lib.array_api_compat import ( File "C:\dat\projects\gazeMapper\cxFreeze\.venv\lib\site-packages\scipy\_lib\array_api_compat\numpy\__init__.py", line 16, in <module> __import__(__package__ + '.linalg') ModuleNotFoundError: No module named 'scipy._lib.array_api_compat._aliases' ``` **To Reproduce** Two files: test.py ```python from scipy.spatial.transform import Rotation print(Rotation.from_euler('XYZ', [10, 10, 10], degrees=True).as_matrix()) ``` setup.py: ```python import cx_Freeze import pathlib import sys import site path = pathlib.Path(__file__).absolute().parent def get_include_files(): # don't know if this is a bad idea, it certainly didn't help files = [] # scipy dlls for d in site.getsitepackages(): d=pathlib.Path(d)/'scipy'/'.libs' if d.is_dir(): for f in d.iterdir(): if f.is_file() and f.suffix=='' or f.suffix in ['.dll']: files.append((f,pathlib.Path('lib')/f.name)) return files build_options = { "build_exe": { "optimize": 1, "packages": [ 'numpy','scipy' ], "excludes":["tkinter"], "zip_include_packages": "*", "zip_exclude_packages": [], "silent_level": 1, "include_msvcr": True } } if sys.platform.startswith("win"): build_options["build_exe"]["include_files"] = get_include_files() cx_Freeze.setup( name="test", version="0.0.1", description="test", executables=[ cx_Freeze.Executable( script=path / "test.py", target_name="test" ) ], options=build_options, py_modules=[] ) ``` **Expected behavior** exe runs **Desktop (please complete the following information):** - Windows 11 Enterprise - amd64 - cx_Freeze version 7.2.2 - Python version 3.10 - Numpy 2.1.1 - Scipy 1.14.1 **Additional context** at `\.venv\Lib\site-packages\scipy\_lib\array_api_compat` there is no `_aliases.py`, only `__init__.py` with the following content: ```python __version__ = '1.5.1' from .common import * # noqa: F401, F403 ``` `_aliases.py` does exist at `\.venv\Lib\site-packages\scipy\_lib\array_api_compat\common` Both files are packed into library.zip (whole scipy tree is)
Changing the config to `"zip_exclude_packages": ['scipy']`, things work. I assume it should work just fine/the same from the zip file. This will be my workaround for now
2024-10-02T02:42:26
python
Easy
rigetti/pyquil
1,149
rigetti__pyquil-1149
[ "980" ]
07db509c5293df2b4624ca6ac409e4fce2666ea1
diff --git a/pyquil/device/_isa.py b/pyquil/device/_isa.py --- a/pyquil/device/_isa.py +++ b/pyquil/device/_isa.py @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## -from collections import namedtuple -from typing import Union +import sys +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union import networkx as nx import numpy as np @@ -22,35 +22,64 @@ from pyquil.quilatom import Parameter, unpack_qubit from pyquil.quilbase import Gate -THETA = Parameter("theta") -"Used as the symbolic parameter in RZ, CPHASE gates." +if sys.version_info < (3, 7): + from pyquil.external.dataclasses import dataclass +else: + from dataclasses import dataclass DEFAULT_QUBIT_TYPE = "Xhalves" DEFAULT_EDGE_TYPE = "CZ" +THETA = Parameter("theta") +"Used as the symbolic parameter in RZ, CPHASE gates." + -Qubit = namedtuple("Qubit", ["id", "type", "dead", "gates"]) -Edge = namedtuple("Edge", ["targets", "type", "dead", "gates"]) -_ISA = namedtuple("_ISA", ["qubits", "edges"]) +@dataclass +class MeasureInfo: + operator: Optional[str] = None + qubit: Optional[int] = None + target: Optional[Union[int, str]] = None + duration: Optional[float] = None + fidelity: Optional[float] = None -MeasureInfo = namedtuple("MeasureInfo", ["operator", "qubit", "target", "duration", "fidelity"]) -GateInfo = namedtuple("GateInfo", ["operator", "parameters", "arguments", "duration", "fidelity"]) -# make Qubit and Edge arguments optional -Qubit.__new__.__defaults__ = (None,) * len(Qubit._fields) -Edge.__new__.__defaults__ = (None,) * len(Edge._fields) -MeasureInfo.__new__.__defaults__ = (None,) * len(MeasureInfo._fields) -GateInfo.__new__.__defaults__ = (None,) * len(GateInfo._fields) +@dataclass +class GateInfo: + operator: Optional[str] = None + parameters: Optional[Sequence[Union[str, float]]] = None + arguments: Optional[Sequence[Union[str, float]]] = None + duration: Optional[float] = None + fidelity: Optional[float] = None -class ISA(_ISA): +@dataclass +class Qubit: + id: int + type: Optional[str] = None + dead: Optional[bool] = None + gates: Optional[Sequence[Union[GateInfo, MeasureInfo]]] = None + + +@dataclass +class Edge: + targets: Tuple[int, ...] + type: Optional[str] = None + dead: Optional[bool] = None + gates: Optional[Sequence[GateInfo]] = None + + +@dataclass +class ISA: """ Basic Instruction Set Architecture specification. - :ivar Sequence[Qubit] qubits: The qubits associated with the ISA. - :ivar Sequence[Edge] edges: The multi-qubit gates. + :ivar qubits: The qubits associated with the ISA. + :ivar edges: The multi-qubit gates. """ - def to_dict(self): + qubits: Sequence[Qubit] + edges: Sequence[Edge] + + def to_dict(self) -> Dict[str, Any]: """ Create a JSON-serializable representation of the ISA. @@ -80,19 +109,17 @@ def to_dict(self): } :return: A dictionary representation of self. - :rtype: Dict[str, Any] """ - def _maybe_configure(o, t): - # type: (Union[Qubit,Edge], str) -> dict + def _maybe_configure(o: Union[Qubit, Edge], t: str) -> Dict[str, Any]: """ Exclude default values from generated dictionary. - :param Union[Qubit,Edge] o: The object to serialize - :param str t: The default value for ``o.type``. + :param o: The object to serialize + :param t: The default value for ``o.type``. :return: d """ - d = {} + d: Dict[str, Any] = {} if o.gates is not None: d["gates"] = [ { @@ -127,13 +154,12 @@ def _maybe_configure(o, t): } @staticmethod - def from_dict(d): + def from_dict(d: Dict[str, Any]) -> "ISA": """ Re-create the ISA from a dictionary representation. - :param Dict[str,Any] d: The dictionary representation. + :param d: The dictionary representation. :return: The restored ISA. - :rtype: ISA """ return ISA( qubits=sorted( @@ -150,7 +176,7 @@ def from_dict(d): edges=sorted( [ Edge( - targets=[int(q) for q in eid.split("-")], + targets=tuple(int(q) for q in eid.split("-")), type=e.get("type", DEFAULT_EDGE_TYPE), dead=e.get("dead", False), ) @@ -161,13 +187,12 @@ def from_dict(d): ) -def gates_in_isa(isa): +def gates_in_isa(isa: ISA) -> List[Gate]: """ Generate the full gateset associated with an ISA. - :param ISA isa: The instruction set architecture for a QPU. + :param isa: The instruction set architecture for a QPU. :return: A sequence of Gate objects encapsulating all gates compatible with the ISA. - :rtype: Sequence[Gate] """ gates = [] for q in isa.qubits: @@ -211,6 +236,7 @@ def gates_in_isa(isa): gates.append(Gate("XY", [THETA], targets)) gates.append(Gate("XY", [THETA], targets[::-1])) continue + assert e.type is not None if "WILDCARD" in e.type: gates.append(Gate("_", "_", targets)) gates.append(Gate("_", "_", targets[::-1])) @@ -220,7 +246,7 @@ def gates_in_isa(isa): return gates -def isa_from_graph(graph: nx.Graph, oneq_type="Xhalves", twoq_type="CZ") -> ISA: +def isa_from_graph(graph: nx.Graph, oneq_type: str = "Xhalves", twoq_type: str = "CZ") -> ISA: """ Generate an ISA object from a NetworkX graph. @@ -230,7 +256,7 @@ def isa_from_graph(graph: nx.Graph, oneq_type="Xhalves", twoq_type="CZ") -> ISA: """ all_qubits = list(range(max(graph.nodes) + 1)) qubits = [Qubit(i, type=oneq_type, dead=i not in graph.nodes) for i in all_qubits] - edges = [Edge(sorted((a, b)), type=twoq_type, dead=False) for a, b in graph.edges] + edges = [Edge(tuple(sorted((a, b))), type=twoq_type, dead=False) for a, b in graph.edges] return ISA(qubits, edges) diff --git a/pyquil/device/_main.py b/pyquil/device/_main.py --- a/pyquil/device/_main.py +++ b/pyquil/device/_main.py @@ -15,7 +15,7 @@ ############################################################################## import warnings from abc import ABC, abstractmethod -from typing import List, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union import networkx as nx import numpy as np @@ -42,7 +42,7 @@ class AbstractDevice(ABC): @abstractmethod - def qubits(self): + def qubits(self) -> List[int]: """ A sorted list of qubits in the device topology. """ @@ -54,7 +54,7 @@ def qubit_topology(self) -> nx.Graph: """ @abstractmethod - def get_isa(self, oneq_type="Xhalves", twoq_type="CZ") -> ISA: + def get_isa(self, oneq_type: str = "Xhalves", twoq_type: str = "CZ") -> ISA: """ Construct an ISA suitable for targeting by compilation. @@ -65,7 +65,7 @@ def get_isa(self, oneq_type="Xhalves", twoq_type="CZ") -> ISA: """ @abstractmethod - def get_specs(self) -> Specs: + def get_specs(self) -> Optional[Specs]: """ Construct a Specs object required by compilation """ @@ -86,7 +86,7 @@ class Device(AbstractDevice): :ivar NoiseModel noise_model: The noise model for the device. """ - def __init__(self, name, raw): + def __init__(self, name: str, raw: Dict[str, Any]): """ :param name: name of the device :param raw: raw JSON response from the server with additional information about this device. @@ -102,23 +102,25 @@ def __init__(self, name, raw): ) @property - def isa(self): + def isa(self) -> Optional[ISA]: warnings.warn("Accessing the static ISA is deprecated. Use `get_isa`", DeprecationWarning) return self._isa - def qubits(self): + def qubits(self) -> List[int]: + assert self._isa is not None return sorted(q.id for q in self._isa.qubits if not q.dead) def qubit_topology(self) -> nx.Graph: """ The connectivity of qubits in this device given as a NetworkX graph. """ + assert self._isa is not None return isa_to_graph(self._isa) - def get_specs(self): + def get_specs(self) -> Optional[Specs]: return self.specs - def get_isa(self, oneq_type=None, twoq_type=None) -> ISA: + def get_isa(self, oneq_type: Optional[str] = None, twoq_type: Optional[str] = None) -> ISA: """ Construct an ISA suitable for targeting by compilation. @@ -130,7 +132,7 @@ def get_isa(self, oneq_type=None, twoq_type=None) -> ISA: "make an ISA with custom gate types, you'll have to do it by hand." ) - def safely_get(attr, index, default): + def safely_get(attr: str, index: Union[int, Tuple[int, ...]], default: Any) -> Any: if self.specs is None: return default @@ -144,8 +146,8 @@ def safely_get(attr, index, default): else: return default - def qubit_type_to_gates(q): - gates = [ + def qubit_type_to_gates(q: Qubit) -> List[Union[GateInfo, MeasureInfo]]: + gates: List[Union[GateInfo, MeasureInfo]] = [ MeasureInfo( operator="MEASURE", qubit=q.id, @@ -200,9 +202,9 @@ def qubit_type_to_gates(q): ] return gates - def edge_type_to_gates(e): - gates = [] - if e is None or "CZ" in e.type: + def edge_type_to_gates(e: Edge) -> List[GateInfo]: + gates: List[GateInfo] = [] + if e is None or isinstance(e.type, str) and "CZ" in e.type: gates += [ GateInfo( operator="CZ", @@ -212,7 +214,7 @@ def edge_type_to_gates(e): fidelity=safely_get("fCZs", tuple(e.targets), DEFAULT_CZ_FIDELITY), ) ] - if e is not None and "ISWAP" in e.type: + if e is None or isinstance(e.type, str) and "ISWAP" in e.type: gates += [ GateInfo( operator="ISWAP", @@ -222,7 +224,7 @@ def edge_type_to_gates(e): fidelity=safely_get("fISWAPs", tuple(e.targets), DEFAULT_ISWAP_FIDELITY), ) ] - if e is not None and "CPHASE" in e.type: + if e is None or isinstance(e.type, str) and "CPHASE" in e.type: gates += [ GateInfo( operator="CPHASE", @@ -232,7 +234,7 @@ def edge_type_to_gates(e): fidelity=safely_get("fCPHASEs", tuple(e.targets), DEFAULT_CPHASE_FIDELITY), ) ] - if e is not None and "XY" in e.type: + if e is None or isinstance(e.type, str) and "XY" in e.type: gates += [ GateInfo( operator="XY", @@ -242,7 +244,7 @@ def edge_type_to_gates(e): fidelity=safely_get("fXYs", tuple(e.targets), DEFAULT_XY_FIDELITY), ) ] - if e is not None and "WILDCARD" in e.type: + if e is None or isinstance(e.type, str) and "WILDCARD" in e.type: gates += [ GateInfo( operator="_", @@ -254,6 +256,7 @@ def edge_type_to_gates(e): ] return gates + assert self._isa is not None qubits = [ Qubit(id=q.id, type=None, dead=q.dead, gates=qubit_type_to_gates(q)) for q in self._isa.qubits @@ -264,10 +267,10 @@ def edge_type_to_gates(e): ] return ISA(qubits, edges) - def __str__(self): + def __str__(self) -> str: return "<Device {}>".format(self.name) - def __repr__(self): + def __repr__(self) -> str: return str(self) @@ -284,17 +287,17 @@ class NxDevice(AbstractDevice): def __init__(self, topology: nx.Graph) -> None: self.topology = topology - def qubit_topology(self): + def qubit_topology(self) -> nx.Graph: return self.topology - def get_isa(self, oneq_type="Xhalves", twoq_type="CZ"): + def get_isa(self, oneq_type: str = "Xhalves", twoq_type: str = "CZ") -> ISA: return isa_from_graph(self.topology, oneq_type=oneq_type, twoq_type=twoq_type) - def get_specs(self): + def get_specs(self) -> Specs: return specs_from_graph(self.topology) def qubits(self) -> List[int]: return sorted(self.topology.nodes) - def edges(self) -> List[Tuple[int, int]]: - return sorted(tuple(sorted(pair)) for pair in self.topology.edges) # type: ignore + def edges(self) -> List[Tuple[Any, ...]]: + return sorted(tuple(sorted(pair)) for pair in self.topology.edges) diff --git a/pyquil/device/_specs.py b/pyquil/device/_specs.py --- a/pyquil/device/_specs.py +++ b/pyquil/device/_specs.py @@ -13,104 +13,105 @@ # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## +import sys import warnings -from collections import namedtuple +from typing import Any, Dict, Optional, Sequence, Tuple import networkx as nx -QubitSpecs = namedtuple( - "_QubitSpecs", - [ - "id", - "fRO", - "f1QRB", - "f1QRB_std_err", - "f1Q_simultaneous_RB", - "f1Q_simultaneous_RB_std_err", - "T1", - "T2", - "fActiveReset", - ], -) -EdgeSpecs = namedtuple( - "_QubitQubitSpecs", - [ - "targets", - "fBellState", - "fCZ", - "fCZ_std_err", - "fCPHASE", - "fCPHASE_std_err", - "fXY", - "fXY_std_err", - "fISWAP", - "fISWAP_std_err", - ], -) -_Specs = namedtuple("_Specs", ["qubits_specs", "edges_specs"]) - - -class Specs(_Specs): +if sys.version_info < (3, 7): + from pyquil.external.dataclasses import dataclass +else: + from dataclasses import dataclass + + +@dataclass +class QubitSpecs: + id: int + fRO: Optional[float] + f1QRB: Optional[float] + f1QRB_std_err: Optional[float] + f1Q_simultaneous_RB: Optional[float] + f1Q_simultaneous_RB_std_err: Optional[float] + T1: Optional[float] + T2: Optional[float] + fActiveReset: Optional[float] + + +@dataclass +class EdgeSpecs: + targets: Tuple[int, ...] + fBellState: Optional[float] + fCZ: Optional[float] + fCZ_std_err: Optional[float] + fCPHASE: Optional[float] + fCPHASE_std_err: Optional[float] + fXY: Optional[float] + fXY_std_err: Optional[float] + fISWAP: Optional[float] + fISWAP_std_err: Optional[float] + + +@dataclass +class Specs: """ Basic specifications for the device, such as gate fidelities and coherence times. - :ivar List[QubitSpecs] qubits_specs: The specs associated with individual qubits. - :ivar List[EdgesSpecs] edges_specs: The specs associated with edges, or qubit-qubit pairs. + :ivar qubits_specs: The specs associated with individual qubits. + :ivar edges_specs: The specs associated with edges, or qubit-qubit pairs. """ - def f1QRBs(self): + qubits_specs: Sequence[QubitSpecs] + edges_specs: Sequence[EdgeSpecs] + + def f1QRBs(self) -> Dict[int, Optional[float]]: """ Get a dictionary of single-qubit randomized benchmarking fidelities (for individual gate operation, normalized to unity) from the specs, keyed by qubit index. :return: A dictionary of 1Q RB fidelities, normalized to unity. - :rtype: Dict[int, float] """ return {qs.id: qs.f1QRB for qs in self.qubits_specs} - def f1QRB_std_errs(self): + def f1QRB_std_errs(self) -> Dict[int, Optional[float]]: """ Get a dictionary of the standard errors of single-qubit randomized benchmarking fidelities (for individual gate operation, normalized to unity) from the specs, keyed by qubit index. :return: A dictionary of 1Q RB fidelity standard errors, normalized to unity. - :rtype: Dict[int, float] """ return {qs.id: qs.f1QRB_std_err for qs in self.qubits_specs} - def f1Q_simultaneous_RBs(self): + def f1Q_simultaneous_RBs(self) -> Dict[int, Optional[float]]: """ Get a dictionary of single-qubit randomized benchmarking fidelities (for simultaneous gate operation across the chip, normalized to unity) from the specs, keyed by qubit index. :return: A dictionary of simultaneous 1Q RB fidelities, normalized to unity. - :rtype: Dict[int, float] """ return {qs.id: qs.f1Q_simultaneous_RB for qs in self.qubits_specs} - def f1Q_simultaneous_RB_std_errs(self): + def f1Q_simultaneous_RB_std_errs(self) -> Dict[int, Optional[float]]: """ Get a dictionary of the standard errors of single-qubit randomized benchmarking fidelities (for simultaneous gate operation across the chip, normalized to unity) from the specs, keyed by qubit index. :return: A dictionary of simultaneous 1Q RB fidelity standard errors, normalized to unity. - :rtype: Dict[int, float] """ return {qs.id: qs.f1Q_simultaneous_RB_std_err for qs in self.qubits_specs} - def fROs(self): + def fROs(self) -> Dict[int, Optional[float]]: """ Get a dictionary of single-qubit readout fidelities (normalized to unity) from the specs, keyed by qubit index. :return: A dictionary of RO fidelities, normalized to unity. - :rtype: Dict[int, float] """ return {qs.id: qs.fRO for qs in self.qubits_specs} - def fActiveResets(self): + def fActiveResets(self) -> Dict[int, Optional[float]]: """ Get a dictionary of single-qubit active reset fidelities (normalized to unity) from the specs, keyed by qubit index. @@ -119,31 +120,28 @@ def fActiveResets(self): """ return {qs.id: qs.fActiveReset for qs in self.qubits_specs} - def T1s(self): + def T1s(self) -> Dict[int, Optional[float]]: """ Get a dictionary of T1s (in seconds) from the specs, keyed by qubit index. :return: A dictionary of T1s, in seconds. - :rtype: Dict[int, float] """ return {qs.id: qs.T1 for qs in self.qubits_specs} - def T2s(self): + def T2s(self) -> Dict[int, Optional[float]]: """ Get a dictionary of T2s (in seconds) from the specs, keyed by qubit index. :return: A dictionary of T2s, in seconds. - :rtype: Dict[int, float] """ return {qs.id: qs.T2 for qs in self.qubits_specs} - def fBellStates(self): + def fBellStates(self) -> Dict[Tuple[int, ...], Optional[float]]: """ Get a dictionary of two-qubit Bell state fidelities (normalized to unity) from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of Bell state fidelities, normalized to unity. - :rtype: Dict[tuple(int, int), float] """ warnings.warn( DeprecationWarning( @@ -153,73 +151,66 @@ def fBellStates(self): ) return {tuple(es.targets): es.fBellState for es in self.edges_specs} - def fCZs(self): + def fCZs(self) -> Dict[Tuple[int, ...], Optional[float]]: """ Get a dictionary of CZ fidelities (normalized to unity) from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of CZ fidelities, normalized to unity. - :rtype: Dict[tuple(int, int), float] """ return {tuple(es.targets): es.fCZ for es in self.edges_specs} - def fISWAPs(self): + def fISWAPs(self) -> Dict[Tuple[int, ...], Optional[float]]: """ Get a dictionary of ISWAP fidelities (normalized to unity) from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of ISWAP fidelities, normalized to unity. - :rtype: Dict[tuple(int, int), float] """ return {tuple(es.targets): es.fISWAP for es in self.edges_specs} - def fISWAP_std_errs(self): + def fISWAP_std_errs(self) -> Dict[Tuple[int, ...], Optional[float]]: """ Get a dictionary of the standard errors of the ISWAP fidelities from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of ISWAP fidelities, normalized to unity. - :rtype: Dict[tuple(int, int), float] """ return {tuple(es.targets): es.fISWAP_std_err for es in self.edges_specs} - def fXYs(self): + def fXYs(self) -> Dict[Tuple[int, ...], Optional[float]]: """ Get a dictionary of XY(pi) fidelities (normalized to unity) from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of XY/2 fidelities, normalized to unity. - :rtype: Dict[tuple(int, int), float] """ return {tuple(es.targets): es.fXY for es in self.edges_specs} - def fXY_std_errs(self): + def fXY_std_errs(self) -> Dict[Tuple[int, ...], Optional[float]]: """ Get a dictionary of the standard errors of the XY fidelities from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of XY fidelities, normalized to unity. - :rtype: Dict[tuple(int, int), float] """ return {tuple(es.targets): es.fXY_std_err for es in self.edges_specs} - def fCZ_std_errs(self): + def fCZ_std_errs(self) -> Dict[Tuple[int, ...], Optional[float]]: """ Get a dictionary of the standard errors of the CZ fidelities from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of CZ fidelities, normalized to unity. - :rtype: Dict[tuple(int, int), float] """ return {tuple(es.targets): es.fCZ_std_err for es in self.edges_specs} - def fCPHASEs(self): + def fCPHASEs(self) -> Dict[Tuple[int, ...], Optional[float]]: """ Get a dictionary of CPHASE fidelities (normalized to unity) from the specs, keyed by targets (qubit-qubit pairs). :return: A dictionary of CPHASE fidelities, normalized to unity. - :rtype: Dict[tuple(int, int), float] """ warnings.warn( DeprecationWarning( @@ -229,7 +220,7 @@ def fCPHASEs(self): ) return {tuple(es.targets): es.fCPHASE for es in self.edges_specs} - def to_dict(self): + def to_dict(self) -> Dict[str, Any]: """ Create a JSON-serializable representation of the device Specs. @@ -270,7 +261,6 @@ def to_dict(self): } :return: A dctionary representation of self. - :rtype: Dict[str, Any] """ return { "1Q": { @@ -303,13 +293,12 @@ def to_dict(self): } @staticmethod - def from_dict(d): + def from_dict(d: Dict[str, Any]) -> "Specs": """ Re-create the Specs from a dictionary representation. - :param Dict[str, Any] d: The dictionary representation. + :param d: The dictionary representation. :return: The restored Specs. - :rtype: Specs """ return Specs( qubits_specs=sorted( @@ -332,7 +321,7 @@ def from_dict(d): edges_specs=sorted( [ EdgeSpecs( - targets=[int(q) for q in e.split("-")], + targets=tuple(int(q) for q in e.split("-")), fBellState=especs.get("fBellState"), fCZ=especs.get("fCZ"), fCZ_std_err=especs.get("fCZ_std_err"), @@ -350,7 +339,7 @@ def from_dict(d): ) -def specs_from_graph(graph: nx.Graph): +def specs_from_graph(graph: nx.Graph) -> Specs: """ Generate a Specs object from a NetworkX graph with placeholder values for the actual specs.
diff --git a/pyquil/device/tests/test_device.py b/pyquil/device/tests/test_device.py --- a/pyquil/device/tests/test_device.py +++ b/pyquil/device/tests/test_device.py @@ -55,10 +55,10 @@ def test_isa(isa_dict): Qubit(id=3, type="Xhalves", dead=True), ], edges=[ - Edge(targets=[0, 1], type="CZ", dead=False), - Edge(targets=[0, 2], type="CPHASE", dead=False), - Edge(targets=[0, 3], type="CZ", dead=True), - Edge(targets=[1, 2], type="ISWAP", dead=False), + Edge(targets=(0, 1), type="CZ", dead=False), + Edge(targets=(0, 2), type="CPHASE", dead=False), + Edge(targets=(0, 3), type="CZ", dead=True), + Edge(targets=(1, 2), type="ISWAP", dead=False), ], ) assert isa == ISA.from_dict(isa.to_dict()) @@ -115,7 +115,7 @@ def test_specs(specs_dict): ], edges_specs=[ EdgeSpecs( - targets=[0, 1], + targets=(0, 1), fBellState=0.90, fCZ=0.89, fCZ_std_err=0.01, @@ -127,7 +127,7 @@ def test_specs(specs_dict): fCPHASE_std_err=None, ), EdgeSpecs( - targets=[0, 2], + targets=(0, 2), fBellState=0.92, fCZ=0.91, fCZ_std_err=0.20, @@ -139,7 +139,7 @@ def test_specs(specs_dict): fCPHASE_std_err=None, ), EdgeSpecs( - targets=[0, 3], + targets=(0, 3), fBellState=0.89, fCZ=0.88, fCZ_std_err=0.03, @@ -151,7 +151,7 @@ def test_specs(specs_dict): fCPHASE_std_err=None, ), EdgeSpecs( - targets=[1, 2], + targets=(1, 2), fBellState=0.91, fCZ=0.90, fCZ_std_err=0.12, diff --git a/pyquil/tests/test_quantum_computer.py b/pyquil/tests/test_quantum_computer.py --- a/pyquil/tests/test_quantum_computer.py +++ b/pyquil/tests/test_quantum_computer.py @@ -194,8 +194,8 @@ def test_device_stuff(): assert nx.is_isomorphic(qc.qubit_topology(), topo) isa = qc.get_isa(twoq_type="CPHASE") - assert sorted(isa.edges)[0].type == "CPHASE" - assert sorted(isa.edges)[0].targets == [0, 4] + assert isa.edges[0].type == "CPHASE" + assert isa.edges[0].targets == (0, 4) def test_run(forest):
Change the namedtuples in device.py to dataclasses As discussed in #961, using `dataclasses` instead of `namedtuples` would greatly improve readability, understanding, and use of the structures in the `device` module.
2020-01-02T19:58:40
python
Hard
pallets-eco/flask-wtf
512
pallets-eco__flask-wtf-512
[ "511" ]
b86d5c6516344f85f930cdd710b14d54ac88415c
diff --git a/src/flask_wtf/__init__.py b/src/flask_wtf/__init__.py --- a/src/flask_wtf/__init__.py +++ b/src/flask_wtf/__init__.py @@ -5,4 +5,4 @@ from .recaptcha import RecaptchaField from .recaptcha import RecaptchaWidget -__version__ = "1.0.0" +__version__ = "1.0.1.dev0" diff --git a/src/flask_wtf/form.py b/src/flask_wtf/form.py --- a/src/flask_wtf/form.py +++ b/src/flask_wtf/form.py @@ -56,7 +56,7 @@ def wrap_formdata(self, form, formdata): return CombinedMultiDict((request.files, request.form)) elif request.form: return request.form - elif request.get_json(): + elif request.is_json: return ImmutableMultiDict(request.get_json()) return None diff --git a/src/flask_wtf/recaptcha/validators.py b/src/flask_wtf/recaptcha/validators.py --- a/src/flask_wtf/recaptcha/validators.py +++ b/src/flask_wtf/recaptcha/validators.py @@ -30,7 +30,7 @@ def __call__(self, form, field): if current_app.testing: return True - if request.json: + if request.is_json: response = request.json.get("g-recaptcha-response", "") else: response = request.form.get("g-recaptcha-response", "")
diff --git a/tests/test_recaptcha.py b/tests/test_recaptcha.py --- a/tests/test_recaptcha.py +++ b/tests/test_recaptcha.py @@ -80,7 +80,8 @@ def test_render_custom_args(app): app.config["RECAPTCHA_DATA_ATTRS"] = {"red": "blue"} f = RecaptchaForm() render = f.recaptcha() - assert "?key=%28value%29" in render + # new versions of url_encode allow more characters + assert "?key=(value)" in render or "?key=%28value%29" in render assert 'data-red="blue"' in render
Update to Request.get_json() in Werkzeug 2.1.0 breaks empty forms Similar to #510 - the get_json() change in Werkzeug 2.1.0 https://github.com/pallets/werkzeug/issues/2339 breaks any empty submitted form (not json). From form.py: ``` def wrap_formdata(self, form, formdata): if formdata is _Auto: if _is_submitted(): if request.files: return CombinedMultiDict((request.files, request.form)) elif request.form: return request.form elif request.get_json(): return ImmutableMultiDict(request.get_json()) ``` If the form is an empty ImmutableMultiDict - it falls into the get_json() code which is then checking that the content-type header has been set to application/json. Possible solution would be to change elif request.get_json() to elif request.is_json() Expected Behavior: Empty form submits should be allowed as they were. In the case of an empty form - None should be returned from the wrapper. Environment: - Python version: 3.8 - Flask-WTF version: 1.0.0 - Flask version: 2.1
2022-03-31T15:26:26
python
Easy
pytest-dev/pytest-django
979
pytest-dev__pytest-django-979
[ "978" ]
b3b679f2cab9dad70e318f252751ff7659b951d1
diff --git a/pytest_django/fixtures.py b/pytest_django/fixtures.py --- a/pytest_django/fixtures.py +++ b/pytest_django/fixtures.py @@ -167,7 +167,7 @@ def _django_db_helper( serialized_rollback, ) = False, False, None, False - transactional = transactional or ( + transactional = transactional or reset_sequences or ( "transactional_db" in request.fixturenames or "live_server" in request.fixturenames )
diff --git a/tests/test_database.py b/tests/test_database.py --- a/tests/test_database.py +++ b/tests/test_database.py @@ -287,11 +287,16 @@ def test_reset_sequences_disabled(self, request) -> None: marker = request.node.get_closest_marker("django_db") assert not marker.kwargs - @pytest.mark.django_db(transaction=True, reset_sequences=True) + @pytest.mark.django_db(reset_sequences=True) def test_reset_sequences_enabled(self, request) -> None: marker = request.node.get_closest_marker("django_db") assert marker.kwargs["reset_sequences"] + @pytest.mark.django_db(transaction=True, reset_sequences=True) + def test_transaction_reset_sequences_enabled(self, request) -> None: + marker = request.node.get_closest_marker("django_db") + assert marker.kwargs["reset_sequences"] + @pytest.mark.django_db(databases=['default', 'replica', 'second']) def test_databases(self, request) -> None: marker = request.node.get_closest_marker("django_db")
4.5.1: reset_sequences=True fails on MariaDB/MySQL Firstly, thanks for maintaining such a powerful and useful testing library for Django. On to the bug: - OS: Windows 10 - Python: 3.9.1 - pytest-6.2.5 - py-1.11.0 - pluggy-1.0.0 - Django: 3.2.10 Example: @pytest.mark.django_db(reset_sequences=True) def test_reset_sequences(): assert True Output: ERROR my_test.py::test_reset_sequences - AssertionError: reset_sequences cannot be used on TestCase instances
It's missing `transaction=True`. Needs a better error message. Did it work on pytest-django 4.4.0? If yes, then I'll make it work again. Thanks for the fast response! Yes it works on 4.5.0
2021-12-07T14:17:20
python
Easy
rigetti/pyquil
177
rigetti__pyquil-177
[ "176" ]
e10881922b799ab015f750d07156f03b2bca7046
diff --git a/pyquil/kraus.py b/pyquil/kraus.py --- a/pyquil/kraus.py +++ b/pyquil/kraus.py @@ -50,9 +50,8 @@ def _create_kraus_pragmas(name, qubit_indices, kraus_ops): :rtype: str """ - prefix = "PRAGMA ADD-KRAUS {} {}".format(name, " ".join(map(str, qubit_indices))) pragmas = [Pragma("ADD-KRAUS", - qubit_indices, + [name] + list(qubit_indices), "({})".format(" ".join(map(format_parameter, np.ravel(k))))) for k in kraus_ops] return pragmas
diff --git a/pyquil/tests/test_quil.py b/pyquil/tests/test_quil.py --- a/pyquil/tests/test_quil.py +++ b/pyquil/tests/test_quil.py @@ -520,11 +520,11 @@ def test_kraus(): ret = pq.out() assert ret == """X 0 -PRAGMA ADD-KRAUS 0 "(0.0+0.0i 1.0+0.0i 1.0+0.0i 0.0+0.0i)" -PRAGMA ADD-KRAUS 0 "(0.0+0.0i 0.0+0.0i 0.0+0.0i 0.0+0.0i)" +PRAGMA ADD-KRAUS X 0 "(0.0+0.0i 1.0+0.0i 1.0+0.0i 0.0+0.0i)" +PRAGMA ADD-KRAUS X 0 "(0.0+0.0i 0.0+0.0i 0.0+0.0i 0.0+0.0i)" X 1 -PRAGMA ADD-KRAUS 1 "(0.0+0.0i 1.0+0.0i 1.0+0.0i 0.0+0.0i)" -PRAGMA ADD-KRAUS 1 "(0.0+0.0i 0.0+0.0i 0.0+0.0i 0.0+0.0i)" +PRAGMA ADD-KRAUS X 1 "(0.0+0.0i 1.0+0.0i 1.0+0.0i 0.0+0.0i)" +PRAGMA ADD-KRAUS X 1 "(0.0+0.0i 0.0+0.0i 0.0+0.0i 0.0+0.0i)" """ # test error due to bad normalization with pytest.raises(ValueError):
`ADD-KRAUS` does not pass the gate name to `Pragma` constructor As is `ADD-KRAUS` is broken, but the fix is easy.
2017-11-09T01:17:37
python
Hard
pytest-dev/pytest-django
323
pytest-dev__pytest-django-323
[ "322" ]
274efdfd48e806830e08d003d93af1e6070eb2b3
diff --git a/pytest_django/plugin.py b/pytest_django/plugin.py --- a/pytest_django/plugin.py +++ b/pytest_django/plugin.py @@ -539,6 +539,20 @@ def _template_string_if_invalid_marker(request): else: dj_settings.TEMPLATE_STRING_IF_INVALID.fail = False + +@pytest.fixture(autouse=True, scope='function') +def _django_clear_site_cache(): + """Clears ``django.contrib.sites.models.SITE_CACHE`` to avoid + unexpected behavior with cached site objects. + """ + + if django_settings_is_configured(): + from django.conf import settings as dj_settings + + if 'django.contrib.sites' in dj_settings.INSTALLED_APPS: + from django.contrib.sites.models import Site + Site.objects.clear_cache() + # ############### Helper Functions ################
diff --git a/tests/test_environment.py b/tests/test_environment.py --- a/tests/test_environment.py +++ b/tests/test_environment.py @@ -3,9 +3,12 @@ import os import pytest +from django.contrib.sites.models import Site +from django.contrib.sites import models as site_models from django.core import mail from django.db import connection from django.test import TestCase +from pytest_django.lazy_django import get_django_version from pytest_django_test.app.models import Item @@ -215,3 +218,26 @@ def test_more_verbose_with_vv_and_reusedb(self, testdir): "*PASSED*"]) assert ("*Destroying test database for alias 'default' ('*')...*" not in result.stdout.str()) + + +@pytest.mark.skipif( + get_django_version() < (1, 8), + reason='Django 1.7 requires settings.SITE_ID to be set, so this test is invalid' +) +@pytest.mark.django_db +@pytest.mark.parametrize('site_name', ['site1', 'site2']) +def test_clear_site_cache(site_name, rf, monkeypatch): + request = rf.get('/') + monkeypatch.setattr(request, 'get_host', lambda: 'foo.com') + Site.objects.create(domain='foo.com', name=site_name) + assert Site.objects.get_current(request=request).name == site_name + + +@pytest.mark.django_db +@pytest.mark.parametrize('site_name', ['site1', 'site2']) +def test_clear_site_cache_check_site_cache_size(site_name, settings): + assert len(site_models.SITE_CACHE) == 0 + site = Site.objects.create(domain='foo.com', name=site_name) + settings.SITE_ID = site.id + assert Site.objects.get_current() == site + assert len(site_models.SITE_CACHE) == 1
Tests with django sites framework onetoonefield causes unexpected behavior Assume you have a model: ``` class Customer(models.Model): site = models.OneToOneField('sites.Site') ``` And when using the sites middleware, without setting SITE_ID, the site is looked up and cached based on the requests host information: https://github.com/django/django/blob/master/django/contrib/sites/models.py#L12 This causes unexpected behavior if testing a multi tenant site, as the request.site object will be the one from the SITE_CACHE, that might have an already populated _request.site.customer_ from previous execution. I will submit a proposal for fixing this, as it can cause plenty of pain when debugging :)
2016-04-01T13:38:30
python
Easy
pytest-dev/pytest-django
1,189
pytest-dev__pytest-django-1189
[ "1188" ]
6d5c272519037031f0b68d78dca44727b860d65e
diff --git a/pytest_django/fixtures.py b/pytest_django/fixtures.py --- a/pytest_django/fixtures.py +++ b/pytest_django/fixtures.py @@ -129,8 +129,8 @@ def _get_databases_for_test(test: pytest.Item) -> tuple[Iterable[str], bool]: test_cls = getattr(test, "cls", None) if test_cls and issubclass(test_cls, TransactionTestCase): - serialized_rollback = getattr(test, "serialized_rollback", False) - databases = getattr(test, "databases", None) + serialized_rollback = getattr(test_cls, "serialized_rollback", False) + databases = getattr(test_cls, "databases", None) else: fixtures = getattr(test, "fixturenames", ()) marker_db = test.get_closest_marker("django_db")
diff --git a/tests/test_database.py b/tests/test_database.py --- a/tests/test_database.py +++ b/tests/test_database.py @@ -432,6 +432,28 @@ def test_db_access_3(self): ) +def test_django_testcase_multi_db(django_pytester: DjangoPytester) -> None: + """Test that Django TestCase multi-db support works.""" + + django_pytester.create_test_module( + """ + import pytest + from django.test import TestCase + from .app.models import Item, SecondItem + + class TestCase(TestCase): + databases = ["default", "second"] + + def test_db_access(self): + Item.objects.count() == 0 + SecondItem.objects.count() == 0 + """ + ) + + result = django_pytester.runpytest_subprocess("-v", "--reuse-db") + result.assert_outcomes(passed=1) + + class Test_database_blocking: def test_db_access_in_conftest(self, django_pytester: DjangoPytester) -> None: """Make sure database access in conftest module is prohibited."""
django.test.TestCase with multiples database doesn't create secondary db in 4.11.0 With the 4.11.0 version, if a Django TestCase is setup to use multiples database with the `databases` attribute, only the default database is created on runtime. ``` class Test(TestCase): databases = {"default", "db2"} ``` Trying to add the decorator `@pytest.mark.django_db(databases=['default', 'db2'])` on the class doesn't work either. The problem seems to come from the line [133 in pytest_django/fixtures.py](https://github.com/pytest-dev/pytest-django/blob/6d5c272519037031f0b68d78dca44727b860d65e/pytest_django/fixtures.py#L133) The line tries to get an attribute from the test function and not from the test class. ``` databases = getattr(test_cls, "databases", None) ``` This fixes the problem with Django TestCase without altering pytest.mark.django_db PS : the line 132 (serialized_rollback) seems to be broken also. I have not tried using _serialized_rollback_, so I can't confirm.
Ouch, silly mistake! I will fix and do a patch release. Thanks for the report.
2025-04-03T18:40:04
python
Hard
rigetti/pyquil
1,492
rigetti__pyquil-1492
[ "1486" ]
76c95c2b5ccdca93cce6f2b972dafda5a680ee13
diff --git a/pyquil/api/_abstract_compiler.py b/pyquil/api/_abstract_compiler.py --- a/pyquil/api/_abstract_compiler.py +++ b/pyquil/api/_abstract_compiler.py @@ -102,12 +102,15 @@ def __init__( self._timeout = timeout self._client_configuration = client_configuration or QCSClientConfiguration.load() - self._compiler_client = CompilerClient(client_configuration=self._client_configuration, request_timeout=timeout) if event_loop is None: event_loop = asyncio.get_event_loop() self._event_loop = event_loop + self._compiler_client = CompilerClient( + client_configuration=self._client_configuration, request_timeout=timeout, event_loop=self._event_loop + ) + self._connect() def get_version_info(self) -> Dict[str, Any]: diff --git a/pyquil/api/_compiler_client.py b/pyquil/api/_compiler_client.py --- a/pyquil/api/_compiler_client.py +++ b/pyquil/api/_compiler_client.py @@ -13,10 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## +import asyncio from contextlib import contextmanager from dataclasses import dataclass -from typing import Iterator, Optional, List +from typing import Iterator, List, Optional +import qcs_sdk import rpcq from qcs_api_client.client import QCSClientConfiguration from rpcq.messages import TargetDevice as TargetQuantumProcessor @@ -151,7 +153,13 @@ class CompilerClient: Client for making requests to a Quil compiler. """ - def __init__(self, *, client_configuration: QCSClientConfiguration, request_timeout: float = 10.0) -> None: + def __init__( + self, + *, + client_configuration: QCSClientConfiguration, + request_timeout: float = 10.0, + event_loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> None: """ Instantiate a new compiler client. @@ -164,17 +172,19 @@ def __init__(self, *, client_configuration: QCSClientConfiguration, request_time self.base_url = base_url self.timeout = request_timeout + if event_loop is None: + event_loop = asyncio.get_event_loop() + self._event_loop = event_loop def get_version(self) -> str: """ Get version info for compiler server. """ - with self._rpcq_client() as rpcq_client: # type: rpcq.Client - version: Optional[str] = rpcq_client.call("get_version_info").get("quilc") - if version is None: - raise ValueError("Expected compiler version info to contain a 'quilc' field.") - return version + async def _get_quilc_version() -> str: + return await qcs_sdk.get_quilc_version() + + return self._event_loop.run_until_complete(_get_quilc_version()) def compile_to_native_quil(self, request: CompileToNativeQuilRequest) -> CompileToNativeQuilResponse: """
diff --git a/test/unit/conftest.py b/test/unit/conftest.py --- a/test/unit/conftest.py +++ b/test/unit/conftest.py @@ -1,6 +1,5 @@ import json import os -from pathlib import Path from typing import Dict, Any import numpy as np diff --git a/test/unit/test_compiler_client.py b/test/unit/test_compiler_client.py --- a/test/unit/test_compiler_client.py +++ b/test/unit/test_compiler_client.py @@ -14,6 +14,14 @@ # limitations under the License. ############################################################################## +from test.unit.utils import patch_rpcq_client + +try: + from unittest.mock import AsyncMock +except ImportError: # 3.7 requires this backport of AsyncMock + from asyncmock import AsyncMock + +import qcs_sdk import rpcq from _pytest.monkeypatch import MonkeyPatch from pytest import raises @@ -22,16 +30,15 @@ from pyquil.api._compiler_client import ( CompilerClient, - GenerateRandomizedBenchmarkingSequenceResponse, - GenerateRandomizedBenchmarkingSequenceRequest, - ConjugatePauliByCliffordResponse, + CompileToNativeQuilRequest, + CompileToNativeQuilResponse, ConjugatePauliByCliffordRequest, + ConjugatePauliByCliffordResponse, + GenerateRandomizedBenchmarkingSequenceRequest, + GenerateRandomizedBenchmarkingSequenceResponse, NativeQuilMetadataResponse, - CompileToNativeQuilResponse, - CompileToNativeQuilRequest, ) from pyquil.external.rpcq import CompilerISA, compiler_isa_to_target_quantum_processor -from test.unit.utils import patch_rpcq_client def test_init__sets_base_url_and_timeout(monkeypatch: MonkeyPatch): @@ -70,12 +77,11 @@ def test_get_version__returns_version(mocker: MockerFixture): client_configuration = QCSClientConfiguration.load() compiler_client = CompilerClient(client_configuration=client_configuration) - rpcq_client = patch_rpcq_client(mocker=mocker, return_value={"quilc": "1.2.3"}) + version_mock = AsyncMock(return_value="1.2.3") + get_quilc_version_mock = mocker.patch("qcs_sdk.get_quilc_version", version_mock) assert compiler_client.get_version() == "1.2.3" - rpcq_client.call.assert_called_once_with( - "get_version_info" - ) + assert get_quilc_version_mock.call_count == 1 def test_compile_to_native_quil__returns_native_quil( @@ -99,7 +105,7 @@ def test_compile_to_native_quil__returns_native_quil( topological_swaps=3, qpu_runtime_estimation=0.1618, ), - ) + ), ) request = CompileToNativeQuilRequest( program="some-program", @@ -130,12 +136,12 @@ def test_compile_to_native_quil__returns_native_quil( ) -def test_conjugate_pauli_by_clifford__returns_conjugation_result( - mocker: MockerFixture -): +def test_conjugate_pauli_by_clifford__returns_conjugation_result(mocker: MockerFixture): client_configuration = QCSClientConfiguration.load() compiler_client = CompilerClient(client_configuration=client_configuration) - rpcq_client = patch_rpcq_client(mocker=mocker, return_value=rpcq.messages.ConjugateByCliffordResponse(phase=42, pauli="pauli")) + rpcq_client = patch_rpcq_client( + mocker=mocker, return_value=rpcq.messages.ConjugateByCliffordResponse(phase=42, pauli="pauli") + ) request = ConjugatePauliByCliffordRequest( pauli_indices=[0, 1, 2], @@ -151,7 +157,7 @@ def test_conjugate_pauli_by_clifford__returns_conjugation_result( rpcq.messages.ConjugateByCliffordRequest( pauli=rpcq.messages.PauliTerm(indices=[0, 1, 2], symbols=["x", "y", "z"]), clifford="cliff", - ) + ), ) @@ -161,7 +167,9 @@ def test_generate_randomized_benchmarking_sequence__returns_benchmarking_sequenc client_configuration = QCSClientConfiguration.load() compiler_client = CompilerClient(client_configuration=client_configuration) - rpcq_client = patch_rpcq_client(mocker=mocker, return_value=rpcq.messages.RandomizedBenchmarkingResponse(sequence=[[3, 1, 4], [1, 6, 1]])) + rpcq_client = patch_rpcq_client( + mocker=mocker, return_value=rpcq.messages.RandomizedBenchmarkingResponse(sequence=[[3, 1, 4], [1, 6, 1]]) + ) request = GenerateRandomizedBenchmarkingSequenceRequest( depth=42, @@ -181,5 +189,5 @@ def test_generate_randomized_benchmarking_sequence__returns_benchmarking_sequenc gateset=["some", "gate", "set"], seed=314, interleaver="some-interleaver", - ) + ), )
Get version info requests to quilc should go through the QCS SDK Currently, the qcs-sdk handles all external requests to `quilc` _except_ for getting version info. We need add a method for getting that data to QCS SDK Rust (see [this issue](https://github.com/rigetti/qcs-sdk-rust/issues/205)), then follow-up and use it here. This supports #1485
Good catch!
2022-11-03T16:56:26
python
Hard
pallets-eco/flask-wtf
264
pallets-eco__flask-wtf-264
[ "227" ]
f306c360f74362be3aac89c43cdc7c37008764fb
diff --git a/flask_wtf/_compat.py b/flask_wtf/_compat.py --- a/flask_wtf/_compat.py +++ b/flask_wtf/_compat.py @@ -6,9 +6,11 @@ if not PY2: text_type = str string_types = (str,) + from urllib.parse import urlparse else: text_type = unicode string_types = (str, unicode) + from urlparse import urlparse def to_bytes(text): diff --git a/flask_wtf/csrf.py b/flask_wtf/csrf.py --- a/flask_wtf/csrf.py +++ b/flask_wtf/csrf.py @@ -8,128 +8,94 @@ :copyright: (c) 2013 by Hsiaoming Yang. """ -import os -import hmac import hashlib -import time -from flask import Blueprint -from flask import current_app, session, request, abort +import os +import warnings +from functools import wraps + +from flask import Blueprint, current_app, request, session +from itsdangerous import BadData, URLSafeTimedSerializer +from werkzeug.exceptions import BadRequest from werkzeug.security import safe_str_cmp -from ._compat import to_bytes, string_types -try: - from urlparse import urlparse -except ImportError: - # python 3 - from urllib.parse import urlparse +from ._compat import FlaskWTFDeprecationWarning, string_types, urlparse __all__ = ('generate_csrf', 'validate_csrf', 'CsrfProtect') -def generate_csrf(secret_key=None, time_limit=None, token_key='csrf_token', url_safe=False): - """Generate csrf token code. - - :param secret_key: A secret key for mixing in the token, - default is Flask.secret_key. - :param time_limit: Token valid in the time limit, - default is 3600s. - """ +def _get_secret_key(secret_key=None): if not secret_key: - secret_key = current_app.config.get( - 'WTF_CSRF_SECRET_KEY', current_app.secret_key - ) + secret_key = current_app.config.get('WTF_CSRF_SECRET_KEY', current_app.secret_key) if not secret_key: - raise Exception('Must provide secret_key to use csrf.') + raise Exception('Must provide secret_key to use CSRF.') + + return secret_key - if time_limit is None: - time_limit = current_app.config.get('WTF_CSRF_TIME_LIMIT', 3600) - if token_key not in session: - session[token_key] = hashlib.sha1(os.urandom(64)).hexdigest() - - if time_limit: - expires = int(time.time() + time_limit) - csrf_build = '%s%s' % (session[token_key], expires) - else: - expires = '' - csrf_build = session[token_key] - - hmac_csrf = hmac.new( - to_bytes(secret_key), - to_bytes(csrf_build), - digestmod=hashlib.sha1 - ).hexdigest() - delimiter = '--' if url_safe else '##' - return '%s%s%s' % (expires, delimiter, hmac_csrf) - - -def validate_csrf(data, secret_key=None, time_limit=None, token_key='csrf_token', url_safe=False): - """Check if the given data is a valid csrf token. - - :param data: The csrf token value to be checked. - :param secret_key: A secret key for mixing in the token, - default is Flask.secret_key. - :param time_limit: Check if the csrf token is expired. - default is True. +def generate_csrf(secret_key=None, token_key='csrf_token'): + """Generate a CSRF token. The token is cached for a request, so multiple + calls to this function will generate the same token. + + During testing, it might be useful to access the signed token in + ``request.csrf_token`` and the raw token in ``session['csrf_token']``. + + :param secret_key: Used to securely sign the token. Default is + ``WTF_CSRF_SECRET_KEY`` or ``SECRET_KEY``. + :param token_key: key where token is stored in session for comparision. """ - delimiter = '--' if url_safe else '##' - if not data or delimiter not in data: - return False - try: - expires, hmac_csrf = data.split(delimiter, 1) - except ValueError: - return False # unpack error + if not getattr(request, token_key, None): + if token_key not in session: + session[token_key] = hashlib.sha1(os.urandom(64)).hexdigest() - if time_limit is None: - time_limit = current_app.config.get('WTF_CSRF_TIME_LIMIT', 3600) + s = URLSafeTimedSerializer(_get_secret_key(secret_key), salt='wtf-csrf-token') + setattr(request, token_key, s.dumps(session[token_key])) - if time_limit: - try: - expires = int(expires) - except ValueError: - return False + return getattr(request, token_key) - now = int(time.time()) - if now > expires: - return False - if not secret_key: - secret_key = current_app.config.get( - 'WTF_CSRF_SECRET_KEY', current_app.secret_key - ) +def validate_csrf(data, secret_key=None, time_limit=None, token_key='csrf_token'): + """Check if the given data is a valid CSRF token. This compares the given + signed token to the one stored in the session. - if token_key not in session: + :param data: The signed CSRF token to be checked. + :param secret_key: Used to securely sign the token. Default is + ``WTF_CSRF_SECRET_KEY`` or ``SECRET_KEY``. + :param time_limit: Number of seconds that the token is valid. Default is + ``WTF_CSRF_TIME_LIMIT`` or 3600 seconds (60 minutes). + :param token_key: key where token is stored in session for comparision. + """ + + if not data or token_key not in session: return False - csrf_build = '%s%s' % (session[token_key], expires) - hmac_compare = hmac.new( - to_bytes(secret_key), - to_bytes(csrf_build), - digestmod=hashlib.sha1 - ).hexdigest() + s = URLSafeTimedSerializer(_get_secret_key(secret_key), salt='wtf-csrf-token') - return safe_str_cmp(hmac_compare, hmac_csrf) + if time_limit is None: + time_limit = current_app.config.get('WTF_CSRF_TIME_LIMIT', 3600) + try: + token = s.loads(data, max_age=time_limit) + except BadData: + return False -class CsrfProtect(object): - """Enable csrf protect for Flask. + return safe_str_cmp(session[token_key], token) - Register it with:: - app = Flask(__name__) - CsrfProtect(app) - - And in the templates, add the token input:: +class CsrfProtect(object): + """Enable CSRF protection globally for a Flask app. - <input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/> + :: - If you need to send the token via AJAX, and there is no form:: + app = Flask(__name__) + csrf = CsrfProtect(app) - <meta name="csrf_token" content="{{ csrf_token() }}" /> + Checks the ``csrf_token`` field sent with forms, or the ``X-CSRFToken`` + header sent with JavaScript requests. Render the token in templates using + ``{{ csrf_token() }}``. - You can grab the csrf token with JavaScript, and send the token together. + See the :ref:`csrf` documentation. """ def __init__(self, app=None): @@ -140,24 +106,19 @@ def __init__(self, app=None): self.init_app(app) def init_app(self, app): - self._app = app - app.jinja_env.globals['csrf_token'] = generate_csrf - app.config.setdefault( - 'WTF_CSRF_HEADERS', ['X-CSRFToken', 'X-CSRF-Token'] - ) - app.config.setdefault('WTF_CSRF_SSL_STRICT', True) app.config.setdefault('WTF_CSRF_ENABLED', True) app.config.setdefault('WTF_CSRF_CHECK_DEFAULT', True) - app.config.setdefault('WTF_CSRF_METHODS', ['POST', 'PUT', 'PATCH']) + app.config['WTF_CSRF_METHODS'] = set(app.config.get( + 'WTF_CSRF_METHODS', ['POST', 'PUT', 'PATCH', 'DELETE'] + )) + app.config.setdefault('WTF_CSRF_HEADERS', ['X-CSRFToken', 'X-CSRF-Token']) + app.config.setdefault('WTF_CSRF_SSL_STRICT', True) - # expose csrf_token as a helper in all templates - @app.context_processor - def csrf_token(): - return dict(csrf_token=generate_csrf) + app.jinja_env.globals['csrf_token'] = generate_csrf + app.context_processor(lambda: {'csrf_token': generate_csrf}) @app.before_request def _csrf_protect(): - # many things come from django.middleware.csrf if not app.config['WTF_CSRF_ENABLED']: return @@ -171,15 +132,17 @@ def _csrf_protect(): return view = app.view_functions.get(request.endpoint) + if not view: return - if self._exempt_views or self._exempt_blueprints: - dest = '%s.%s' % (view.__module__, view.__name__) - if dest in self._exempt_views: - return - if request.blueprint in self._exempt_blueprints: - return + if request.blueprint in self._exempt_blueprints: + return + + dest = '%s.%s' % (view.__module__, view.__name__) + + if dest in self._exempt_views: + return self.protect() @@ -190,85 +153,119 @@ def _get_csrf_token(self): for key in request.form: if key.endswith('csrf_token'): csrf_token = request.form[key] + if csrf_token: return csrf_token - for header_name in self._app.config['WTF_CSRF_HEADERS']: + for header_name in current_app.config['WTF_CSRF_HEADERS']: csrf_token = request.headers.get(header_name) + if csrf_token: return csrf_token + return None def protect(self): - if request.method not in self._app.config['WTF_CSRF_METHODS']: + if request.method not in current_app.config['WTF_CSRF_METHODS']: return if not validate_csrf(self._get_csrf_token()): - reason = 'CSRF token missing or incorrect.' - return self._error_response(reason) + self._error_response('CSRF token missing or incorrect.') - if request.is_secure and self._app.config['WTF_CSRF_SSL_STRICT']: + if request.is_secure and current_app.config['WTF_CSRF_SSL_STRICT']: if not request.referrer: - reason = 'Referrer checking failed - no Referrer.' - return self._error_response(reason) + self._error_response('Referrer checking failed - no Referrer.') good_referrer = 'https://%s/' % request.host + if not same_origin(request.referrer, good_referrer): - reason = 'Referrer checking failed - origin does not match.' - return self._error_response(reason) + self._error_response('Referrer checking failed - origin does not match.') request.csrf_valid = True # mark this request is csrf valid def exempt(self, view): - """A decorator that can exclude a view from csrf protection. - - Remember to put the decorator above the `route`:: + """Mark a view or blueprint to be excluded from CSRF protection. - csrf = CsrfProtect(app) + :: - @csrf.exempt @app.route('/some-view', methods=['POST']) + @csrf.exempt def some_view(): - return + ... + + :: + + bp = Blueprint(...) + csrf.exempt(bp) + """ + if isinstance(view, Blueprint): self._exempt_blueprints.add(view.name) return view + if isinstance(view, string_types): view_location = view else: view_location = '%s.%s' % (view.__module__, view.__name__) + self._exempt_views.add(view_location) return view def _error_response(self, reason): - return abort(400, reason) + raise CsrfError(reason) def error_handler(self, view): - """A decorator that set the error response handler. + """Register a function that will generate the response for CSRF errors. - It accepts one parameter `reason`:: + .. deprecated:: 0.14 + Use the standard Flask error system with + ``@app.errorhandler(CsrfError)`` instead. This will be removed in + version 1.0. + + The function will be passed one argument, ``reason``. By default it will + raise a :class:`~flask_wtf.csrf.CsrfError`. :: @csrf.error_handler def csrf_error(reason): return render_template('error.html', reason=reason) - By default, it will return a 400 response. + Due to historical reasons, the function may either return a response + or raise an exception with :func:`flask.abort`. """ - self._error_response = view + + warnings.warn(FlaskWTFDeprecationWarning( + '"@csrf.error_handler" is deprecated. Use the standard Flask error ' + 'system with "@app.errorhandler(CsrfError)" instead. This will be' + 'removed in 1.0.' + ), stacklevel=2) + + @wraps(view) + def handler(reason): + response = current_app.make_response(view(reason)) + raise CsrfError(response.get_data(as_text=True), response=response) + + self._error_response = handler return view -def same_origin(current_uri, compare_uri): - parsed_uri = urlparse(current_uri) - parsed_compare = urlparse(compare_uri) +class CsrfError(BadRequest): + """Raise if the client sends invalid CSRF data with the request. - if parsed_uri.scheme != parsed_compare.scheme: - return False + Generates a 400 Bad Request response with the failure reason by default. + Customize the response by registering a handler with + :meth:`flask.Flask.errorhandler`. + """ - if parsed_uri.hostname != parsed_compare.hostname: - return False + description = 'CSRF token missing or incorrect.' - if parsed_uri.port != parsed_compare.port: - return False - return True + +def same_origin(current_uri, compare_uri): + current = urlparse(current_uri) + compare = urlparse(compare_uri) + + return ( + current.scheme == compare.scheme + and current.hostname == compare.hostname + and current.port == compare.port + ) diff --git a/flask_wtf/form.py b/flask_wtf/form.py --- a/flask_wtf/form.py +++ b/flask_wtf/form.py @@ -1,16 +1,14 @@ # coding: utf-8 import warnings -import werkzeug.datastructures -from flask import request, session, current_app +from flask import current_app, request, session from jinja2 import Markup -from wtforms.compat import with_metaclass +from werkzeug.datastructures import MultiDict from wtforms.ext.csrf.form import SecureForm -from wtforms.form import FormMeta from wtforms.validators import ValidationError -from wtforms.widgets import HiddenInput, SubmitInput +from wtforms.widgets import HiddenInput -from ._compat import text_type, string_types, FlaskWTFDeprecationWarning +from ._compat import FlaskWTFDeprecationWarning, string_types, text_type from .csrf import generate_csrf, validate_csrf try: @@ -70,7 +68,7 @@ def __init__(self, formdata=_Auto, obj=None, prefix='', csrf_context=None, formdata = formdata.copy() formdata.update(request.files) elif request.get_json(): - formdata = werkzeug.datastructures.MultiDict(request.get_json()) + formdata = MultiDict(request.get_json()) else: formdata = None @@ -94,25 +92,24 @@ def __init__(self, formdata=_Auto, obj=None, prefix='', csrf_context=None, def generate_csrf_token(self, csrf_context=None): if not self.csrf_enabled: return None - return generate_csrf(self.SECRET_KEY, self.TIME_LIMIT) + + return generate_csrf(secret_key=self.SECRET_KEY) def validate_csrf_token(self, field): if not self.csrf_enabled: return True - if hasattr(request, 'csrf_valid') and request.csrf_valid: + + if getattr(request, 'csrf_valid', False): # this is validated by CsrfProtect return True - if not validate_csrf(field.data, self.SECRET_KEY, self.TIME_LIMIT): + + if not self.validate_csrf_data(field.data): raise ValidationError(field.gettext('CSRF token missing')) def validate_csrf_data(self, data): - """Check if the csrf data is valid. + """Check if the given data is a valid CSRF token.""" - .. versionadded: 0.9.0 - - :param data: the csrf string to be validated. - """ - return validate_csrf(data, self.SECRET_KEY, self.TIME_LIMIT) + return validate_csrf(data, secret_key=self.SECRET_KEY, time_limit=self.TIME_LIMIT) def is_submitted(self): """Consider the form submitted if there is an active request and
diff --git a/tests/base.py b/tests/base.py --- a/tests/base.py +++ b/tests/base.py @@ -1,10 +1,12 @@ from __future__ import with_statement -from flask import Flask, render_template, jsonify -from wtforms import StringField, HiddenField, SubmitField -from wtforms.validators import DataRequired +from unittest import TestCase as _TestCase + +from flask import Flask, jsonify, render_template from flask_wtf import FlaskForm from flask_wtf._compat import text_type +from wtforms import HiddenField, StringField, SubmitField +from wtforms.validators import DataRequired def to_unicode(text): @@ -37,7 +39,7 @@ class SimpleForm(FlaskForm): pass -class TestCase(object): +class TestCase(_TestCase): def setUp(self): self.app = self.create_app() self.client = self.app.test_client() diff --git a/tests/templates/csrf_macro.html b/tests/templates/csrf_macro.html --- a/tests/templates/csrf_macro.html +++ b/tests/templates/csrf_macro.html @@ -1,3 +1,3 @@ {% macro render_csrf_token() %} - <input type="hidden" name="csrf_token" value="{{ csrf_token() }}"> + <input name="csrf_token" type="hidden" value="{{ csrf_token() }}"> {% endmacro %} diff --git a/tests/test_csrf.py b/tests/test_csrf.py --- a/tests/test_csrf.py +++ b/tests/test_csrf.py @@ -1,21 +1,13 @@ from __future__ import with_statement import re -from flask import Blueprint -from flask import render_template -from flask_wtf.csrf import CsrfProtect -from flask_wtf.csrf import validate_csrf, generate_csrf -from .base import TestCase, MyForm, to_unicode +import warnings -csrf_token_input = re.compile( - r'name="csrf_token" type="hidden" value="([0-9a-z#A-Z-\.]*)"' -) +from flask import Blueprint, abort, render_template, request +from flask_wtf._compat import FlaskWTFDeprecationWarning +from flask_wtf.csrf import CsrfError, CsrfProtect, generate_csrf, validate_csrf - -def get_csrf_token(data): - match = csrf_token_input.search(to_unicode(data)) - assert match - return match.groups()[0] +from .base import MyForm, TestCase class TestCSRF(TestCase): @@ -59,9 +51,9 @@ def test_invalid_csrf(self): response = self.client.post("/", data={"name": "danny"}) assert response.status_code == 400 - @self.csrf.error_handler - def invalid(reason): - return reason + @self.app.errorhandler(CsrfError) + def handle_csrf_error(e): + return e, 200 response = self.client.post("/", data={"name": "danny"}) assert response.status_code == 200 @@ -86,8 +78,9 @@ def test_invalid_secure_csrf3(self): assert response.status_code == 400 def test_valid_csrf(self): - response = self.client.get("/") - csrf_token = get_csrf_token(response.data) + with self.client: + self.client.get('/') + csrf_token = request.csrf_token response = self.client.post("/", data={ "name": "danny", @@ -96,8 +89,9 @@ def test_valid_csrf(self): assert b'DANNY' in response.data def test_prefixed_csrf(self): - response = self.client.get('/') - csrf_token = get_csrf_token(response.data) + with self.client: + self.client.get('/') + csrf_token = request.csrf_token response = self.client.post('/', data={ 'prefix-name': 'David', @@ -106,8 +100,9 @@ def test_prefixed_csrf(self): assert response.status_code == 200 def test_invalid_secure_csrf(self): - response = self.client.get("/", base_url='https://localhost/') - csrf_token = get_csrf_token(response.data) + with self.client: + self.client.get('/', base_url='https://localhost/') + csrf_token = request.csrf_token response = self.client.post( "/", @@ -161,8 +156,10 @@ def test_invalid_secure_csrf(self): assert b'not match' in response.data def test_valid_secure_csrf(self): - response = self.client.get("/", base_url='https://localhost/') - csrf_token = get_csrf_token(response.data) + with self.client: + self.client.get('/', base_url='https://localhost/') + csrf_token = request.csrf_token + response = self.client.post( "/", data={"name": "danny"}, @@ -177,8 +174,9 @@ def test_valid_secure_csrf(self): assert response.status_code == 200 def test_valid_csrf_method(self): - response = self.client.get("/") - csrf_token = get_csrf_token(response.data) + with self.client: + self.client.get('/') + csrf_token = request.csrf_token response = self.client.post("/csrf-protect-method", data={ "csrf_token": csrf_token @@ -189,17 +187,19 @@ def test_invalid_csrf_method(self): response = self.client.post("/csrf-protect-method", data={"name": "danny"}) assert response.status_code == 400 - @self.csrf.error_handler - def invalid(reason): - return reason + @self.app.errorhandler(CsrfError) + def handle_csrf_error(e): + return e, 200 response = self.client.post("/", data={"name": "danny"}) assert response.status_code == 200 assert b'token missing' in response.data def test_empty_csrf_headers(self): - response = self.client.get("/", base_url='https://localhost/') - csrf_token = get_csrf_token(response.data) + with self.client: + self.client.get('/', base_url='https://localhost/') + csrf_token = request.csrf_token + self.app.config['WTF_CSRF_HEADERS'] = list() response = self.client.post( "/", @@ -215,8 +215,10 @@ def test_empty_csrf_headers(self): assert response.status_code == 400 def test_custom_csrf_headers(self): - response = self.client.get("/", base_url='https://localhost/') - csrf_token = get_csrf_token(response.data) + with self.client: + self.client.get('/', base_url='https://localhost/') + csrf_token = request.csrf_token + self.app.config['WTF_CSRF_HEADERS'] = ['X-XSRF-TOKEN'] response = self.client.post( "/", @@ -239,9 +241,10 @@ def test_testing(self): self.app.testing = True self.client.post("/", data={"name": "danny"}) - def test_csrf_exempt(self): - response = self.client.get("/csrf-exempt") - csrf_token = get_csrf_token(response.data) + def test_csrf_exempt_view_with_form(self): + with self.client: + self.client.get('/', base_url='https://localhost/') + csrf_token = request.csrf_token response = self.client.post("/csrf-exempt", data={ "name": "danny", @@ -257,7 +260,7 @@ def test_validate_csrf(self): def test_validate_not_expiring_csrf(self): with self.app.test_request_context(): - csrf_token = generate_csrf(time_limit=False) + csrf_token = generate_csrf() assert validate_csrf(csrf_token, time_limit=False) def test_csrf_token_helper(self): @@ -265,8 +268,9 @@ def test_csrf_token_helper(self): def withtoken(): return render_template("csrf.html") - response = self.client.get('/token') - assert b'#' in response.data + with self.client: + response = self.client.get('/token') + assert re.search(br'token: ([0-9a-zA-Z\-._]+)', response.data) def test_csrf_blueprint(self): response = self.client.post('/bar/foo') @@ -281,8 +285,9 @@ def test_csrf_token_macro(self): def withtoken(): return render_template("import_csrf.html") - response = self.client.get('/token') - assert b'#' in response.data + with self.client: + response = self.client.get('/token') + assert request.csrf_token in response.data.decode('utf8') def test_csrf_custom_token_key(self): with self.app.test_request_context(): @@ -296,16 +301,31 @@ def test_csrf_custom_token_key(self): # However, the custom key can validate as well assert validate_csrf(custom_csrf_token, token_key='oauth_state') - def test_csrf_url_safe(self): - with self.app.test_request_context(): - # Generate a normal and URL safe CSRF token - default_csrf_token = generate_csrf() - url_safe_csrf_token = generate_csrf(url_safe=True) + def test_old_error_handler(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', FlaskWTFDeprecationWarning) + + @self.csrf.error_handler + def handle_csrf_error(reason): + return 'caught csrf return' + + self.assertEqual(len(w), 1) + assert issubclass(w[0].category, FlaskWTFDeprecationWarning) + assert 'app.errorhandler(CsrfError)' in str(w[0].message) + + rv = self.client.post('/', data={'name': 'david'}) + assert b'caught csrf return' in rv.data + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', FlaskWTFDeprecationWarning) + + @self.csrf.error_handler + def handle_csrf_error(reason): + abort(401, 'caught csrf abort') - # Verify they are not the same and the URL one is truly URL safe - assert default_csrf_token != url_safe_csrf_token - assert '#' not in url_safe_csrf_token - assert re.match(r'^[a-f0-9]+--[a-f0-9]+$', url_safe_csrf_token) + self.assertEqual(len(w), 1) + assert issubclass(w[0].category, FlaskWTFDeprecationWarning) + assert 'app.errorhandler(CsrfError)' in str(w[0].message) - # Verify we can validate our URL safe key - assert validate_csrf(url_safe_csrf_token, url_safe=True) + rv = self.client.post('/', data={'name': 'david'}) + assert b'caught csrf abort' in rv.data diff --git a/tests/test_validation.py b/tests/test_validation.py --- a/tests/test_validation.py +++ b/tests/test_validation.py @@ -1,18 +1,8 @@ from __future__ import with_statement -import re +from flask import request -from .base import TestCase, MyForm, to_unicode - -csrf_token_input = re.compile( - r'name="csrf_token" type="hidden" value="([0-9a-z#A-Z-\.]*)"' -) - - -def get_csrf_token(data): - match = csrf_token_input.search(to_unicode(data)) - assert match - return match.groups()[0] +from .base import MyForm, TestCase, to_unicode class TestValidateOnSubmit(TestCase): @@ -93,18 +83,20 @@ def test_ajax(self): assert response.status_code == 200 def test_valid_csrf(self): + with self.client: + self.client.get('/') + csrf_token = request.csrf_token - response = self.client.get("/") - csrf_token = get_csrf_token(response.data) - - response = self.client.post("/", data={"name": "danny", - "csrf_token": csrf_token}) + response = self.client.post('/', data={ + 'name': 'danny', + 'csrf_token': csrf_token + }) assert b'DANNY' in response.data def test_double_csrf(self): - - response = self.client.get("/") - csrf_token = get_csrf_token(response.data) + with self.client: + self.client.get('/') + csrf_token = request.csrf_token response = self.client.post("/two_forms/", data={ "name": "danny", @@ -114,6 +106,4 @@ def test_double_csrf(self): def test_valid_csrf_data(self): with self.app.test_request_context(): - form = MyForm() - csrf_token = get_csrf_token(form.csrf_token()) - assert form.validate_csrf_data(csrf_token) + assert MyForm().validate_csrf_data(request.csrf_token)
Make it easier to access a CSRF token in automated tests If you want to run your automated tests with CSRF enabled (which is a good idea if it's enabled in production), there's no good built-in way to do so. Even the tests for this project [use regular expressions to parse the CSRF token out of the page](https://github.com/lepture/flask-wtf/blob/3c9dcf5cc/tests/test_csrf.py#L10-L18), which is brittle and confusing. It would be better to provide some way to access the CSRF token in the Flask test client itself. I've written a GitHub Gist that walks though how I implemented this myself, but maybe Flask-WTF could change some internals to make it cleaner and easier? https://gist.github.com/singingwolfboy/2fca1de64950d5dfed72
2016-10-13T04:41:57
python
Hard
rigetti/pyquil
421
rigetti__pyquil-421
[ "384" ]
9612be90f91405ecbc089b3496f1c85d9c177cc8
diff --git a/pyquil/noise.py b/pyquil/noise.py --- a/pyquil/noise.py +++ b/pyquil/noise.py @@ -296,22 +296,51 @@ def damping_after_dephasing(T1, T2, gate_time): # You can only apply gate-noise to non-parametrized gates or parametrized gates at fixed parameters. NO_NOISE = ["RZ"] -NOISY_GATES = { - ("I", ()): (np.eye(2), "NOISY-I"), - ("RX", (np.pi / 2,)): (np.array([[1, -1j], - [-1j, 1]]) / np.sqrt(2), - "NOISY-RX-PLUS-90"), - ("RX", (-np.pi / 2,)): (np.array([[1, 1j], - [1j, 1]]) / np.sqrt(2), - "NOISY-RX-MINUS-90"), - ("RX", (np.pi,)): (np.array([[0, -1j], - [-1j, 0]]), - "NOISY-RX-PLUS-180"), - ("RX", (-np.pi,)): (np.array([[0, 1j], - [1j, 0]]), - "NOISY-RX-MINUS-180"), - ("CZ", ()): (np.diag([1, 1, 1, -1]), "NOISY-CZ"), -} +ANGLE_TOLERANCE = 1e-10 + + +class NoisyGateUndefined(Exception): + """Raise when user attempts to use noisy gate outside of currently supported set.""" + pass + + +def get_noisy_gate(gate_name, params): + """ + Look up the numerical gate representation and a proposed 'noisy' name. + + :param str gate_name: The Quil gate name + :param Tuple[float] params: The gate parameters. + :return: A tuple (matrix, noisy_name) with the representation of the ideal gate matrix + and a proposed name for the noisy version. + :rtype: Tuple[np.array, str] + """ + params = tuple(params) + if gate_name == "I": + assert params == () + return np.eye(2), "NOISY-I" + if gate_name == "RX": + angle, = params + if np.isclose(angle, np.pi / 2, atol=ANGLE_TOLERANCE): + return (np.array([[1, -1j], + [-1j, 1]]) / np.sqrt(2), + "NOISY-RX-PLUS-90") + elif np.isclose(angle, -np.pi / 2, atol=ANGLE_TOLERANCE): + return (np.array([[1, 1j], + [1j, 1]]) / np.sqrt(2), + "NOISY-RX-MINUS-90") + elif np.isclose(angle, np.pi, atol=ANGLE_TOLERANCE): + return (np.array([[0, -1j], + [-1j, 0]]), + "NOISY-RX-PLUS-180") + elif np.isclose(angle, -np.pi, atol=ANGLE_TOLERANCE): + return (np.array([[0, 1j], + [1j, 0]]), + "NOISY-RX-MINUS-180") + elif gate_name == "CZ": + assert params == () + return np.diag([1, 1, 1, -1]), "NOISY-CZ" + raise NoisyGateUndefined("Undefined gate and params: {}{}\n" + "Please restrict yourself to I, RX(+/-pi), RX(+/-pi/2), CZ") def _get_program_gates(prog): @@ -384,21 +413,18 @@ def _decoherence_noise_model(gates, T1=30e-6, T2=30e-6, gate_time_1q=50e-9, key = (g.name, tuple(g.params)) if g.name in NO_NOISE: continue - if key in NOISY_GATES: - matrix, _ = NOISY_GATES[key] - if len(targets) == 1: - noisy_I = noisy_identities_1q[targets[0]] - else: - if len(targets) != 2: - raise ValueError("Noisy gates on more than 2Q not currently supported") - - # note this ordering of the tensor factors is necessary due to how the QVM orders - # the wavefunction basis - noisy_I = tensor_kraus_maps(noisy_identities_2q[targets[1]], - noisy_identities_2q[targets[0]]) + matrix, _ = get_noisy_gate(g.name, g.params) + + if len(targets) == 1: + noisy_I = noisy_identities_1q[targets[0]] else: - raise ValueError("Cannot create noisy version of {}. ".format(g) + - "Please restrict yourself to CZ, RX(+/-pi/2), I, RZ(theta)") + if len(targets) != 2: + raise ValueError("Noisy gates on more than 2Q not currently supported") + + # note this ordering of the tensor factors is necessary due to how the QVM orders + # the wavefunction basis + noisy_I = tensor_kraus_maps(noisy_identities_2q[targets[1]], + noisy_identities_2q[targets[0]]) kraus_maps.append(KrausModel(g.name, tuple(g.params), targets, combine_kraus_maps(noisy_I, [matrix]), # FIXME (Nik): compute actual avg gate fidelity for this simple @@ -434,13 +460,13 @@ def _noise_model_program_header(noise_model): # obtain ideal gate matrix and new, noisy name by looking it up in the NOISY_GATES dict try: - ideal_gate, new_name = NOISY_GATES[k.gate, tuple(k.params)] + ideal_gate, new_name = get_noisy_gate(k.gate, tuple(k.params)) # if ideal version of gate has not yet been DEFGATE'd, do this if new_name not in defgates: p.defgate(new_name, ideal_gate) defgates.add(new_name) - except KeyError: + except NoisyGateUndefined: print("WARNING: Could not find ideal gate definition for gate {}".format(k.gate), file=sys.stderr) new_name = k.gate @@ -468,11 +494,10 @@ def apply_noise_model(prog, noise_model): new_prog = _noise_model_program_header(noise_model) for i in prog: if isinstance(i, Gate): - key = (i.name, tuple(i.params)) - if key in NOISY_GATES: - _, new_name = NOISY_GATES[key] + try: + _, new_name = get_noisy_gate(i.name, tuple(i.params)) new_prog += Gate(new_name, [], i.qubits) - else: + except NoisyGateUndefined: new_prog += i else: new_prog += i
diff --git a/pyquil/tests/test_noise.py b/pyquil/tests/test_noise.py --- a/pyquil/tests/test_noise.py +++ b/pyquil/tests/test_noise.py @@ -208,3 +208,17 @@ def test_apply_noise_model(): assert i.command in ['ADD-KRAUS', 'READOUT-POVM'] elif isinstance(i, Gate): assert i.name in NO_NOISE or not i.params + + +def test_apply_noise_model_perturbed_angles(): + eps = 1e-15 + p = Program(RX(np.pi / 2 + eps)(0), RX(np.pi / 2 - eps)(1), CZ(0, 1), RX(np.pi / 2 + eps)(1)) + noise_model = _decoherence_noise_model(_get_program_gates(p)) + pnoisy = apply_noise_model(p, noise_model) + for i in pnoisy: + if isinstance(i, DefGate): + pass + elif isinstance(i, Pragma): + assert i.command in ['ADD-KRAUS', 'READOUT-POVM'] + elif isinstance(i, Gate): + assert i.name in NO_NOISE or not i.params
Adding decoherence noise models fails when `RX` angles are perturbed from +/-pi or +/-pi/2 Two ways to fix this: 1. Quick: allow angles to deviate from pi within some tolerance (e.g., 10^{-10}) that is much stricter than any anticipated gate error. 2. Slow: actually implement a mechanism to translate arbitrary pyquil gates (including parameters) to symbolic or numeric matrices. This would have to be able to resolve the default gateset AND check the program for `defgates` and extract those when applicable. As a benefit, we could support helpers for noise models for arbitrary gates.
@mpharrigan what are your thoughts? Is this an issue in practice? Can we do quick in the near term and slow eventually in the context of noise models for arbitrary gates Yeah, it was an issue for me today when I tried to add noise after compiling the program an external user wants to simulate with noise. I am happy to do the quick fix first
2018-05-03T19:00:14
python
Hard
marcelotduarte/cx_Freeze
2,759
marcelotduarte__cx_Freeze-2759
[ "2738" ]
aee3a1a3195a358e814c4fcbdc116e192132bbf5
diff --git a/cx_Freeze/_compat.py b/cx_Freeze/_compat.py --- a/cx_Freeze/_compat.py +++ b/cx_Freeze/_compat.py @@ -7,6 +7,7 @@ from pathlib import Path __all__ = [ + "ABI_THREAD", "BUILD_EXE_DIR", "EXE_SUFFIX", "EXT_SUFFIX", @@ -21,8 +22,9 @@ PLATFORM = sysconfig.get_platform() PYTHON_VERSION = sysconfig.get_python_version() +ABI_THREAD = sysconfig.get_config_var("abi_thread") or "" -BUILD_EXE_DIR = Path(f"build/exe.{PLATFORM}-{PYTHON_VERSION}") +BUILD_EXE_DIR = Path(f"build/exe.{PLATFORM}-{PYTHON_VERSION}{ABI_THREAD}") EXE_SUFFIX = sysconfig.get_config_var("EXE") EXT_SUFFIX = sysconfig.get_config_var("EXT_SUFFIX") diff --git a/cx_Freeze/executable.py b/cx_Freeze/executable.py --- a/cx_Freeze/executable.py +++ b/cx_Freeze/executable.py @@ -11,6 +11,7 @@ from typing import TYPE_CHECKING from cx_Freeze._compat import ( + ABI_THREAD, EXE_SUFFIX, IS_MACOS, IS_MINGW, @@ -74,12 +75,17 @@ def base(self) -> Path: @base.setter def base(self, name: str | Path | None) -> None: - # The default base is the legacy console, except for + # The default base is the legacy console, except for Python 3.13t and # Python 3.13 on macOS, that supports only the new console - if IS_MACOS and sys.version_info[:2] >= (3, 13): - name = name or "console" - else: + version = sys.version_info[:2] + if ( + version <= (3, 13) + and ABI_THREAD == "" + and not (IS_MACOS and version == (3, 13)) + ): name = name or "console_legacy" + else: + name = name or "console" # silently ignore gui and service on non-windows systems if not (IS_WINDOWS or IS_MINGW) and name in ("gui", "service"): name = "console" diff --git a/cx_Freeze/freezer.py b/cx_Freeze/freezer.py --- a/cx_Freeze/freezer.py +++ b/cx_Freeze/freezer.py @@ -22,11 +22,13 @@ from setuptools import Distribution from cx_Freeze._compat import ( + ABI_THREAD, BUILD_EXE_DIR, IS_CONDA, IS_MACOS, IS_MINGW, IS_WINDOWS, + PYTHON_VERSION, ) from cx_Freeze.common import get_resource_file_path, process_path_specs from cx_Freeze.exception import FileError, OptionError @@ -1035,9 +1037,10 @@ def _default_bin_includes(self) -> list[str]: # MSYS2 python returns a static library. names = [name.replace(".dll.a", ".dll")] else: + py_version = f"{PYTHON_VERSION}{ABI_THREAD}" names = [ f"python{sys.version_info[0]}.dll", - f"python{sys.version_info[0]}{sys.version_info[1]}.dll", + f"python{py_version.replace('.','')}.dll", ] python_shared_libs: list[Path] = [] for name in names: @@ -1113,7 +1116,7 @@ def _default_bin_excludes(self) -> list[str]: def _default_bin_includes(self) -> list[str]: python_shared_libs: list[Path] = [] # Check for distributed "cx_Freeze/bases/lib/Python" - name = "Python" + name = f"Python{ABI_THREAD.upper()}" for bin_path in self._default_bin_path_includes(): fullname = Path(bin_path, name).resolve() if fullname.is_file(): diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -97,7 +97,8 @@ def build_extension(self, ext) -> None: library_dirs.append(get_config_var("LIBPL")) if not ENABLE_SHARED or IS_CONDA: library_dirs.append(get_config_var("LIBDIR")) - libraries.append(f"python{get_python_version()}") + abi_thread = get_config_var("abi_thread") or "" + libraries.append(f"python{get_python_version()}{abi_thread}") if get_config_var("LIBS"): extra_args.extend(get_config_var("LIBS").split()) if get_config_var("LIBM"): @@ -275,38 +276,48 @@ def get_extensions() -> list[Extension]: os.environ.get("CI", "") != "true" or os.environ.get("CIBUILDWHEEL", "0") != "1" ) + abi_thread = get_config_var("abi_thread") or "" + version = sys.version_info[:2] extensions = [ Extension( "cx_Freeze.bases.console", ["source/bases/console.c", "source/bases/_common.c"], optional=optional, - ), - Extension( - "cx_Freeze.bases.console_legacy", - ["source/legacy/console.c"], - depends=["source/legacy/common.c"], - optional=optional - or (sys.version_info[:2] >= (3, 13) and IS_MACOS), - ), + ) ] - - if IS_MINGW or IS_WINDOWS: + if ( + version <= (3, 13) + and abi_thread == "" + and not (IS_MACOS and version == (3, 13)) + ): extensions += [ Extension( - "cx_Freeze.bases.Win32GUI", - ["source/legacy/Win32GUI.c"], + "cx_Freeze.bases.console_legacy", + ["source/legacy/console.c"], depends=["source/legacy/common.c"], - libraries=["user32"], optional=optional, - ), - Extension( - "cx_Freeze.bases.Win32Service", - ["source/legacy/Win32Service.c"], - depends=["source/legacy/common.c"], - extra_link_args=["/DELAYLOAD:cx_Logging"], - libraries=["advapi32"], - optional=optional, - ), + ) + ] + if IS_MINGW or IS_WINDOWS: + if version <= (3, 13) and abi_thread == "": + extensions += [ + Extension( + "cx_Freeze.bases.Win32GUI", + ["source/legacy/Win32GUI.c"], + depends=["source/legacy/common.c"], + libraries=["user32"], + optional=optional, + ), + Extension( + "cx_Freeze.bases.Win32Service", + ["source/legacy/Win32Service.c"], + depends=["source/legacy/common.c"], + extra_link_args=["/DELAYLOAD:cx_Logging"], + libraries=["advapi32"], + optional=optional, + ), + ] + extensions += [ Extension( "cx_Freeze.bases.gui", ["source/bases/Win32GUI.c", "source/bases/_common.c"],
diff --git a/tests/test_executables.py b/tests/test_executables.py --- a/tests/test_executables.py +++ b/tests/test_executables.py @@ -12,6 +12,7 @@ from cx_Freeze import Executable from cx_Freeze._compat import ( + ABI_THREAD, BUILD_EXE_DIR, EXE_SUFFIX, IS_MACOS, @@ -241,14 +242,18 @@ def test_executables( ("icon.ico", "icon.icns", "icon.png", "icon.svg"), ), ] -if IS_MACOS and sys.version_info[:2] >= (3, 13): +if ( + sys.version_info[:2] <= (3, 13) + and ABI_THREAD == "" + and not (IS_MACOS and sys.version_info[:2] == (3, 13)) +): TEST_VALID_PARAMETERS += [ - ("base", None, "console-"), + ("base", None, "console_legacy-"), + ("base", "console_legacy", "console_legacy-"), ] else: TEST_VALID_PARAMETERS += [ - ("base", None, "console_legacy-"), - ("base", "console_legacy", "console_legacy-"), + ("base", None, "console-"), ] if IS_WINDOWS or IS_MINGW: TEST_VALID_PARAMETERS += [ diff --git a/tests/test_freezer.py b/tests/test_freezer.py --- a/tests/test_freezer.py +++ b/tests/test_freezer.py @@ -12,6 +12,7 @@ from cx_Freeze import Freezer from cx_Freeze._compat import ( + ABI_THREAD, BUILD_EXE_DIR, EXE_SUFFIX, IS_CONDA, @@ -99,19 +100,20 @@ def test_freezer_default_bin_includes(tmp_path: Path, monkeypatch) -> None: monkeypatch.chdir(tmp_path) freezer = Freezer(executables=["hello.py"]) + py_version = f"{PYTHON_VERSION}{ABI_THREAD}" if IS_MINGW: - expected = f"libpython{PYTHON_VERSION}.dll" + expected = f"libpython{py_version}.dll" elif IS_WINDOWS: - expected = f"python{PYTHON_VERSION.replace('.','')}.dll" + expected = f"python{py_version.replace('.','')}.dll" elif IS_CONDA: # macOS or Linux if IS_MACOS: - expected = f"libpython{PYTHON_VERSION}.dylib" + expected = f"libpython{py_version}.dylib" else: - expected = f"libpython{PYTHON_VERSION}.so*" + expected = f"libpython{py_version}.so*" elif IS_MACOS: - expected = "Python" + expected = f"Python{ABI_THREAD.upper()}" elif ENABLE_SHARED: # Linux - expected = f"libpython{PYTHON_VERSION}.so*" + expected = f"libpython{py_version}.so*" else: assert freezer.default_bin_includes == [] return
Replace _PyMem_RawStrdup with strdup Per https://github.com/python/cpython/issues/127991#issuecomment-2547810583 Fixes #2568
2024-12-30T01:37:43
python
Easy
marcelotduarte/cx_Freeze
2,583
marcelotduarte__cx_Freeze-2583
[ "2572" ]
cf4dc4997e54208d90d4bdc419276da6af39dbc4
diff --git a/cx_Freeze/executable.py b/cx_Freeze/executable.py --- a/cx_Freeze/executable.py +++ b/cx_Freeze/executable.py @@ -116,7 +116,7 @@ def init_module_name(self) -> str: :rtype: str """ - return f"{self._internal_name}__init__" + return f"__init__{self._internal_name}" @property def init_script(self) -> Path: @@ -143,7 +143,7 @@ def main_module_name(self) -> str: :rtype: str """ - return f"{self._internal_name}__main__" + return f"__main__{self._internal_name}" @property def main_script(self) -> Path: @@ -231,10 +231,10 @@ def target_name(self, name: str | None) -> None: for invalid in STRINGREPLACE: name = name.replace(invalid, "_") name = os.path.normcase(name) - if not name.isidentifier(): + self._internal_name: str = name + if not self.init_module_name.isidentifier(): msg = f"target_name is invalid: {self._name!r}" raise OptionError(msg) - self._internal_name: str = name def validate_executables(dist: Distribution, attr: str, value) -> None: diff --git a/cx_Freeze/initscripts/__startup__.py b/cx_Freeze/initscripts/__startup__.py --- a/cx_Freeze/initscripts/__startup__.py +++ b/cx_Freeze/initscripts/__startup__.py @@ -124,8 +124,8 @@ def run() -> None: """Determines the name of the initscript and execute it.""" name = get_name(sys.executable) try: - # basically, the basename of the executable plus __init__ - module_init = __import__(name + "__init__") + # basically is __init__ plus the basename of the executable + module_init = __import__(f"__init__{name}") except ModuleNotFoundError: # but can be renamed when only one executable exists num = BUILD_CONSTANTS._EXECUTABLES_NUMBER # noqa: SLF001 @@ -137,5 +137,5 @@ def run() -> None: ) raise RuntimeError(msg) from None name = get_name(BUILD_CONSTANTS._EXECUTABLE_NAME_0) # noqa: SLF001 - module_init = __import__(name + "__init__") - module_init.run(name + "__main__") + module_init = __import__(f"__init__{name}") + module_init.run(f"__main__{name}")
diff --git a/tests/test_cli.py b/tests/test_cli.py --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -10,16 +10,16 @@ import pytest from generate_samples import create_package, run_command +from cx_Freeze._compat import BUILD_EXE_DIR, EXE_SUFFIX + if TYPE_CHECKING: from pathlib import Path -SUFFIX = ".exe" if sys.platform == "win32" else "" - SOURCE = """ test.py print("Hello from cx_Freeze") command - cxfreeze test.py --target-dir=dist --excludes=tkinter + cxfreeze --script test.py --target-dir=dist --excludes=tkinter """ @@ -28,7 +28,7 @@ def test_cxfreeze(tmp_path: Path) -> None: create_package(tmp_path, SOURCE) output = run_command(tmp_path) - file_created = tmp_path / "dist" / f"test{SUFFIX}" + file_created = tmp_path / "dist" / f"test{EXE_SUFFIX}" assert file_created.is_file(), f"file not found: {file_created}" output = run_command(tmp_path, file_created, timeout=10) @@ -49,15 +49,30 @@ def test_cxfreeze_additional_help(tmp_path: Path) -> None: assert "usage: " in output +def test_cxfreeze_target_name_not_isidentifier(tmp_path: Path) -> None: + """Test cxfreeze --target-name not isidentifier, but valid filename.""" + create_package(tmp_path, SOURCE) + output = run_command( + tmp_path, + "cxfreeze --script test.py --target-name=12345 --excludes=tkinter", + ) + + file_created = tmp_path / BUILD_EXE_DIR / f"12345{EXE_SUFFIX}" + assert file_created.is_file(), f"file not found: {file_created}" + + output = run_command(tmp_path, file_created, timeout=10) + assert output.startswith("Hello from cx_Freeze") + + def test_cxfreeze_deprecated_behavior(tmp_path: Path) -> None: """Test cxfreeze deprecated behavior.""" create_package(tmp_path, SOURCE) tmp_path.joinpath("test.py").rename(tmp_path / "test2") output = run_command( - tmp_path, "cxfreeze --target-dir=dist --excludes=tkinter test2" + tmp_path, "cxfreeze --install-dir=dist --excludes=tkinter test2" ) - file_created = tmp_path / "dist" / f"test2{SUFFIX}" + file_created = tmp_path / "dist" / f"test2{EXE_SUFFIX}" assert file_created.is_file(), f"file not found: {file_created}" output = run_command(tmp_path, file_created, timeout=10) @@ -73,7 +88,7 @@ def test_cxfreeze_deprecated_option(tmp_path: Path) -> None: ) assert "WARNING: deprecated" in output - file_created = tmp_path / "dist" / f"test{SUFFIX}" + file_created = tmp_path / "dist" / f"test{EXE_SUFFIX}" assert file_created.is_file(), f"file not found: {file_created}" output = run_command(tmp_path, file_created, timeout=10) @@ -127,12 +142,12 @@ def test_cxfreeze_include_path(tmp_path: Path) -> None: create_package(tmp_path, SOURCE_TEST_PATH) output = run_command(tmp_path) - executable = tmp_path / "dist" / f"advanced_1{SUFFIX}" + executable = tmp_path / "dist" / f"advanced_1{EXE_SUFFIX}" assert executable.is_file() output = run_command(tmp_path, executable, timeout=10) assert output == OUTPUT1 - executable = tmp_path / "dist" / f"advanced_2{SUFFIX}" + executable = tmp_path / "dist" / f"advanced_2{EXE_SUFFIX}" assert executable.is_file() output = run_command(tmp_path, executable, timeout=10) assert output == OUTPUT2 diff --git a/tests/test_executables.py b/tests/test_executables.py --- a/tests/test_executables.py +++ b/tests/test_executables.py @@ -232,6 +232,7 @@ def test_executables( ("init_script", "console", "console.py"), ("target_name", None, f"test{EXE_SUFFIX}"), ("target_name", "test1", f"test1{EXE_SUFFIX}"), + ("target_name", "12345", f"12345{EXE_SUFFIX}"), ("target_name", "test-0.1", f"test-0.1{EXE_SUFFIX}"), ("target_name", "test.exe", "test.exe"), ("icon", "icon", ("icon.ico", "icon.icns", "icon.png", "icon.svg")), @@ -279,12 +280,6 @@ def test_valid(option, value, result) -> None: OptionError, "target_name cannot contain the path, only the filename: ", ), - ( - Executable, - {"script": "test.py", "target_name": "0test"}, - OptionError, - "target_name is invalid: ", - ), ], ids=[ "executables-invalid-empty", @@ -292,7 +287,6 @@ def test_valid(option, value, result) -> None: "executable-invalid-base", "executable-invalid-init_script", "executable-invalid-target_name", - "executable-invalid-target_name-isidentifier", ], ) def test_invalid(
Why is Executable target_name has to be A valid identifier? In https://github.com/marcelotduarte/cx_Freeze/blob/7.2.1/cx_Freeze/executable.py#L234 target_name is required to be a valid identifier. Is there any reason for that? I removed that condition and it seems to work fine. my target_name="6578e4ecf0464d7fb253de58"
Maybe a regression - issue #884 fixed by #889
2024-09-23T03:32:58
python
Easy
rigetti/pyquil
745
rigetti__pyquil-745
[ "744" ]
98dec8330958af4723b7befb51345cea182a886c
diff --git a/pyquil/noise.py b/pyquil/noise.py --- a/pyquil/noise.py +++ b/pyquil/noise.py @@ -324,10 +324,24 @@ def damping_after_dephasing(T1, T2, gate_time): :param float gate_time: The gate duration. :return: A list of Kraus operators. """ - damping = damping_kraus_map(p=1 - np.exp(-float(gate_time) / float(T1))) \ - if T1 != INFINITY else [np.eye(2)] - dephasing = dephasing_kraus_map(p=.5 * (1 - np.exp(-2 * gate_time / float(T2)))) \ - if T2 != INFINITY else [np.eye(2)] + assert T1 >= 0 + assert T2 >= 0 + + if T1 != INFINITY: + damping = damping_kraus_map(p=1 - np.exp(-float(gate_time) / float(T1))) + else: + damping = [np.eye(2)] + + if T2 != INFINITY: + gamma_phi = float(gate_time) / float(T2) + if T1 != INFINITY: + if T2 > 2 * T1: + raise ValueError("T2 is upper bounded by 2 * T1") + gamma_phi -= float(gate_time) / float(2 * T1) + + dephasing = dephasing_kraus_map(p=.5 * (1 - np.exp(-2 * gamma_phi))) + else: + dephasing = [np.eye(2)] return combine_kraus_maps(damping, dephasing)
diff --git a/pyquil/tests/test_noise.py b/pyquil/tests/test_noise.py --- a/pyquil/tests/test_noise.py +++ b/pyquil/tests/test_noise.py @@ -70,7 +70,7 @@ def test_damping_after_dephasing(): dephasing = dephasing_kraus_map(p=.5 * (1 - np.exp(-.2))) ks_ref = combine_kraus_maps(damping, dephasing) - ks_actual = damping_after_dephasing(10, 10, 1) + ks_actual = damping_after_dephasing(20, 40 / 3., 2.) np.testing.assert_allclose(ks_actual, ks_ref)
T2 noise model is wrong when T1 is finite In particular, a damping noise model with T1 will lead to a contribution to the dephasing rate 1/T2 that equals 1/(2*T1).
2018-12-21T19:49:19
python
Hard
pytest-dev/pytest-django
231
pytest-dev__pytest-django-231
[ "228" ]
1f279deb0d46c4f7dd161945b50f6e2add85793a
diff --git a/pytest_django/plugin.py b/pytest_django/plugin.py --- a/pytest_django/plugin.py +++ b/pytest_django/plugin.py @@ -51,6 +51,9 @@ def pytest_addoption(parser): group._addoption('--nomigrations', action='store_true', dest='nomigrations', default=False, help='Disable Django 1.7 migrations on test setup') + group._addoption('--no-force-no-debug', + action='store_true', dest='noforcenodebug', default=False, + help='Disable forcing DEBUG setting to False on test setup') parser.addini(CONFIGURATION_ENV, 'django-configurations class to use by pytest-django.') group._addoption('--liveserver', default=None, @@ -236,7 +239,8 @@ def _django_test_environment(request): if django_settings_is_configured(): from django.conf import settings from .compat import setup_test_environment, teardown_test_environment - settings.DEBUG = False + if not request.config.getvalue('noforcenodebug'): + settings.DEBUG = False setup_test_environment() request.addfinalizer(teardown_test_environment)
diff --git a/tests/test_django_settings_module.py b/tests/test_django_settings_module.py --- a/tests/test_django_settings_module.py +++ b/tests/test_django_settings_module.py @@ -244,6 +244,31 @@ def test_debug_is_false(): assert r.ret == 0 +def test_debug_no_force(testdir, monkeypatch): + monkeypatch.delenv('DJANGO_SETTINGS_MODULE') + testdir.makeconftest(""" + from django.conf import settings + + def pytest_configure(): + settings.configure(SECRET_KEY='set from pytest_configure', + DEBUG=True, + DATABASES={'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': ':memory:'}}, + INSTALLED_APPS=['django.contrib.auth', + 'django.contrib.contenttypes',]) + """) + + testdir.makepyfile(""" + from django.conf import settings + def test_debug_is_true(): + assert settings.DEBUG is True + """) + + r = testdir.runpytest('--no-force-no-debug') + assert r.ret == 0 + + @pytest.mark.skipif(not hasattr(django, 'setup'), reason="This Django version does not support app loading") @pytest.mark.django_project(extra_settings="""
why DEBUG is hardcoded to False? Hi https://github.com/pytest-dev/pytest-django/blob/master/pytest_django/plugin.py#L239 this looks not too flexible I tried a lot of things before i found this hardcode - i needed to understand why my liveserver fails, and it returned just standard 500 instead of debug page, and debug is set to True in my test settings So i think this hardcode should be removed to respect test settings
IIRC the setup with Django's testrunner is also False, to reflect what would be used in production, but I am not certain. :+1: for a way to configure/override this. ok i'll prepare PR
2015-04-10T13:35:34
python
Easy
rigetti/pyquil
1,477
rigetti__pyquil-1477
[ "1476" ]
57f0501c2d2bc438f983f81fd5793dc969a04ed3
diff --git a/pyquil/quil.py b/pyquil/quil.py --- a/pyquil/quil.py +++ b/pyquil/quil.py @@ -874,9 +874,9 @@ def __add__(self, other: InstructionDesignator) -> "Program": p = Program() p.inst(self) p.inst(other) - p._calibrations = self.calibrations - p._waveforms = self.waveforms - p._frames = self.frames + p._calibrations = self.calibrations.copy() + p._waveforms = self.waveforms.copy() + p._frames = self.frames.copy() p._memory = self._memory.copy() if isinstance(other, Program): p.calibrations.extend(other.calibrations)
diff --git a/test/unit/test_program.py b/test/unit/test_program.py --- a/test/unit/test_program.py +++ b/test/unit/test_program.py @@ -56,3 +56,31 @@ def test_parameterized_readout_symmetrization(): p += RX(symmetrization[0], 0) p += RX(symmetrization[1], 1) assert parameterized_readout_symmetrization([0, 1]).out() == p.out() + + +def test_adding_does_not_mutate(): + # https://github.com/rigetti/pyquil/issues/1476 + p1 = Program( + """ +DEFCAL RX(pi/2) 32: + FENCE 32 + NONBLOCKING PULSE 32 "rf" drag_gaussian(duration: 3.2e-08, fwhm: 8e-09, t0: 1.6e-08, anh: -190000000.0, alpha: -1.8848698349348032, scale: 0.30631340170943533, phase: 0.0, detuning: 1622438.2425563578) + FENCE 32 + +RX(pi/2) 32 +""" + ) + original_p1 = p1.copy() + p2 = Program( + """ +DEFCAL RX(pi/2) 33: + FENCE 33 + NONBLOCKING PULSE 33 "rf" drag_gaussian(duration: 2e-08, fwhm: 5e-09, t0: 1e-08, anh: -190000000.0, alpha: -0.9473497322033984, scale: 0.25680107985232403, phase: 0.0, detuning: 1322130.5458282642) + FENCE 33 + +RX(pi/2) 33 +""" + ) + p_all = p1 + p2 + assert p1 == original_p1 + assert p1.calibrations != p_all.calibrations diff --git a/test/unit/test_quil.py b/test/unit/test_quil.py old mode 100755 new mode 100644
Adding two `Program`s together unexpectedly mutates first `Program` Pre-Report Checklist -------------------- - [x] I am running the latest versions of pyQuil and the Forest SDK - [x] I checked to make sure that this bug has not already been reported Issue Description ----------------- Summary: when adding two Programs together, like p1 + p2, the first program p1 gets mutated — p1.calibrations will as a result contain the combined calibrations. But I would have expected both p1 and p2 to remain unchanged. I believe the reason for issue is that in the source code, it uses `p.calibrations.extend(other.calibrations)` which mutates the original’s list . https://github.com/rigetti/pyquil/blob/master/pyquil/quil.py#L882 How to Reproduce ---------------- ### Code Snippet ```python print("@@@ p32 before adding") print(p32) print("@@@ p33 before adding") print(p33) p_all = p32 + p33 print("@@@ p32 after adding <-- here is the unexpected behavior") print(p32) print("@@@ p33 after adding") print(p33) print("@@@ p_all after adding") print(p_all) ``` ### Error Output ``` @@@ p32 before adding DEFCAL RX(pi/2) 32: FENCE 32 NONBLOCKING PULSE 32 "rf" drag_gaussian(duration: 3.2e-08, fwhm: 8e-09, t0: 1.6e-08, anh: -190000000.0, alpha: -1.8848698349348032, scale: 0.30631340170943533, phase: 0.0, detuning: 1622438.2425563578) FENCE 32 RX(pi/2) 32 @@@ p33 before adding DEFCAL RX(pi/2) 33: FENCE 33 NONBLOCKING PULSE 33 "rf" drag_gaussian(duration: 2e-08, fwhm: 5e-09, t0: 1e-08, anh: -190000000.0, alpha: -0.9473497322033984, scale: 0.25680107985232403, phase: 0.0, detuning: 1322130.5458282642) FENCE 33 RX(pi/2) 33 @@@ p32 after adding <-- here is the unexpected behavior DEFCAL RX(pi/2) 32: FENCE 32 NONBLOCKING PULSE 32 "rf" drag_gaussian(duration: 3.2e-08, fwhm: 8e-09, t0: 1.6e-08, anh: -190000000.0, alpha: -1.8848698349348032, scale: 0.30631340170943533, phase: 0.0, detuning: 1622438.2425563578) FENCE 32 DEFCAL RX(pi/2) 33: FENCE 33 NONBLOCKING PULSE 33 "rf" drag_gaussian(duration: 2e-08, fwhm: 5e-09, t0: 1e-08, anh: -190000000.0, alpha: -0.9473497322033984, scale: 0.25680107985232403, phase: 0.0, detuning: 1322130.5458282642) FENCE 33 RX(pi/2) 32 @@@ p33 after adding DEFCAL RX(pi/2) 33: FENCE 33 NONBLOCKING PULSE 33 "rf" drag_gaussian(duration: 2e-08, fwhm: 5e-09, t0: 1e-08, anh: -190000000.0, alpha: -0.9473497322033984, scale: 0.25680107985232403, phase: 0.0, detuning: 1322130.5458282642) FENCE 33 RX(pi/2) 33 @@@ p_all after adding DEFCAL RX(pi/2) 32: FENCE 32 NONBLOCKING PULSE 32 "rf" drag_gaussian(duration: 3.2e-08, fwhm: 8e-09, t0: 1.6e-08, anh: -190000000.0, alpha: -1.8848698349348032, scale: 0.30631340170943533, phase: 0.0, detuning: 1622438.2425563578) FENCE 32 DEFCAL RX(pi/2) 33: FENCE 33 NONBLOCKING PULSE 33 "rf" drag_gaussian(duration: 2e-08, fwhm: 5e-09, t0: 1e-08, anh: -190000000.0, alpha: -0.9473497322033984, scale: 0.25680107985232403, phase: 0.0, detuning: 1322130.5458282642) FENCE 33 RX(pi/2) 32 RX(pi/2) 33 ``` Environment Context ------------------- Operating System: macOS Monterey Python Version (`python -V`): 3.9.13 Quilc Version (`quilc --version`): N/A QVM Version (`qvm --version`): N/A Python Environment Details (`pip freeze` or `conda list`): ``` Package Version ----------------------------- ----------- aiohttp 3.8.1 aiohttp-retry 2.4.6 aiosignal 1.2.0 alabaster 0.7.12 ansiwrap 0.8.4 anyio 3.6.1 appdirs 1.4.4 argon2-cffi 21.3.0 argon2-cffi-bindings 21.2.0 asteval 0.9.26 asttokens 2.0.5 async-timeout 4.0.2 asyncssh 2.10.1 atpublic 3.0.1 attrs 20.3.0 Babel 2.10.1 backcall 0.2.0 beautifulsoup4 4.11.1 benchmark-quantum-gates 0.5.0 bitarray 2.5.1 black 22.3.0 bleach 5.0.0 cachetools 5.0.0 certifi 2021.10.8 cffi 1.15.0 charset-normalizer 2.0.12 click 8.1.3 colorama 0.4.4 commonmark 0.9.1 configobj 5.0.6 coverage 6.3.3 cryptography 37.0.2 cvxopt 1.3.0 cvxpy 1.2.0 cycler 0.11.0 debugpy 1.6.3 decorator 5.1.1 defusedxml 0.7.1 dictdiffer 0.9.0 diskcache 5.4.0 distro 1.7.0 docutils 0.17.1 dpath 2.0.6 dulwich 0.20.35 dvc 2.10.2 dvc-render 0.0.5 dvclive 0.8.0 ecos 2.0.10 entrypoints 0.4 executing 0.8.3 fastjsonschema 2.15.3 flake8 4.0.1 flake8-black 0.3.2 flake8-docstrings 1.6.0 flatten-dict 0.4.2 flufl.lock 7.0 fonttools 4.33.3 forest-benchmarking 0.8.0 frozenlist 1.3.0 fsspec 2022.3.0 ftfy 6.1.1 funcy 1.17 future 0.18.2 future-fstrings 1.2.0 gitdb 4.0.9 GitPython 3.1.27 google-api-core 2.7.3 google-api-python-client 2.47.0 google-auth 2.6.6 google-auth-httplib2 0.1.0 googleapis-common-protos 1.56.1 gprof2dot 2021.2.21 grandalf 0.6 h11 0.9.0 httpcore 0.11.1 httplib2 0.20.4 httpx 0.15.5 idna 3.3 imagesize 1.3.0 importlib-metadata 4.11.3 iniconfig 1.1.1 ipykernel 6.13.0 ipympl 0.9.2 ipython 8.3.0 ipython-genutils 0.2.0 ipywidgets 7.7.0 iso8601 0.1.16 isort 5.10.1 jedi 0.18.1 Jinja2 3.1.2 joblib 1.1.0 json5 0.9.8 jsonschema 4.5.1 jupyter-client 7.3.1 jupyter-core 4.10.0 jupyter-lsp 1.5.1 jupyter-server 1.17.0 jupyter-server-mathjax 0.2.5 jupyterlab 3.4.2 jupyterlab-git 0.34.2 jupyterlab-lsp 3.10.1 jupyterlab-pygments 0.2.2 jupyterlab-server 2.13.0 jupyterlab-widgets 1.1.0 kaleido 0.2.1 kiwisolver 1.4.2 lark 0.11.3 lmfit 1.0.3 mailchecker 4.1.16 Mako 1.2.1 MarkupSafe 2.1.1 matplotlib 3.5.2 matplotlib-inline 0.1.3 mccabe 0.6.1 mistune 0.8.4 mpmath 1.2.1 msgpack 0.6.2 multidict 6.0.2 mypy-extensions 0.4.3 nanotime 0.5.2 nbclassic 0.3.7 nbclient 0.6.3 nbconvert 6.5.0 nbdime 3.1.1 nbformat 5.4.0 nest-asyncio 1.5.5 networkx 2.8 notebook 6.4.11 notebook-shim 0.1.0 numexpr 2.8.1 numpy 1.21.0 oauth2client 4.1.3 osqp 0.6.2.post5 packaging 21.3 pandas 1.4.2 pandocfilters 1.5.0 papermill 2.3.4 parso 0.8.3 pathspec 0.9.0 patsy 0.5.2 pexpect 4.8.0 phonenumbers 8.12.48 pickleshare 0.7.5 Pillow 9.1.0 pip 22.2.2 platformdirs 2.5.2 plotly 5.8.0 pluggy 1.0.0 prometheus-client 0.14.1 prompt-toolkit 3.0.29 protobuf 3.20.1 psutil 5.9.0 ptyprocess 0.7.0 pure-eval 0.2.2 py 1.11.0 pyaml 21.10.1 pyarrow 5.0.0 pyasn1 0.4.8 pyasn1-modules 0.2.8 pycodestyle 2.8.0 pycparser 2.21 pydantic 1.9.0 pydocstyle 6.1.1 pydot 1.4.2 PyDrive2 1.10.1 pyflakes 2.4.0 pygit2 1.9.1 Pygments 2.12.0 pygtrie 2.4.2 PyJWT 1.7.1 pyOpenSSL 22.0.0 pyparsing 3.0.9 pyquil 3.1.0 pyrsistent 0.18.1 pytest 7.1.2 pytest-cov 3.0.0 pytest-depends 1.0.1 pytest-profiling 1.7.0 python-benedict 0.25.1 python-dateutil 2.8.2 python-fsutil 0.6.0 python-rapidjson 1.6 python-slugify 6.1.2 pytz 2022.1 PyYAML 6.0 pyzmq 22.3.0 qcs-api-client 0.20.13 qdldl 0.1.5.post2 qpu-hybrid-benchmark-trueq 0.5.8 qutip 4.7.0 qutip-qip 0.2.1 requests 2.27.1 retry 0.9.2 retrying 1.3.3 rfc3339 6.2 rfc3986 1.5.0 rich 12.4.1 rigetti-qpu-hybrid-benchmark 0.5.24 rpcq 3.10.0 rsa 4.8 ruamel.yaml 0.17.21 ruamel.yaml.clib 0.2.6 scikit-learn 1.1.0 scikit-optimize 0.9.0 scipy 1.8.0 scmrepo 0.0.19 scs 3.2.0 seaborn 0.11.2 Send2Trash 1.8.0 setuptools 63.4.1 setuptools-scm 6.4.2 shortuuid 1.0.9 shtab 1.5.4 six 1.16.0 smmap 5.0.0 sniffio 1.2.0 snowballstemmer 2.2.0 soupsieve 2.3.2.post1 Sphinx 4.5.0 sphinx-autodoc-typehints 1.18.1 sphinx-rtd-theme 0.4.3 sphinxcontrib-applehelp 1.0.2 sphinxcontrib-devhelp 1.0.2 sphinxcontrib-htmlhelp 2.0.0 sphinxcontrib-jsmath 1.0.1 sphinxcontrib-qthelp 1.0.3 sphinxcontrib-serializinghtml 1.1.5 stack-data 0.2.0 statsmodels 0.13.2 sympy 1.10.1 tables 3.7.0 tabulate 0.8.9 tenacity 8.0.1 terminado 0.13.3 text-unidecode 1.3 textwrap3 0.9.2 threadpoolctl 3.1.0 tinycss2 1.1.1 tokenize-rt 4.2.1 toml 0.10.2 tomli 2.0.1 tornado 6.1 tqdm 4.64.0 traitlets 5.2.0 trueq 2.13.1 typing_extensions 4.2.0 uncertainties 3.1.6 uritemplate 4.1.1 urllib3 1.26.9 voluptuous 0.13.1 wcwidth 0.2.5 webencodings 0.5.1 websocket-client 1.3.2 wheel 0.37.1 widgetsnbextension 3.6.0 xmltodict 0.13.0 yarl 1.7.2 zc.lockfile 2.0 zipp 3.8.0 ```
2022-09-20T22:23:51
python
Hard
pytest-dev/pytest-django
881
pytest-dev__pytest-django-881
[ "513" ]
bb9e86e0c0141a30d07078f71b288026b6e583d2
diff --git a/pytest_django/fixtures.py b/pytest_django/fixtures.py --- a/pytest_django/fixtures.py +++ b/pytest_django/fixtures.py @@ -304,7 +304,7 @@ def admin_client(db, admin_user): from django.test.client import Client client = Client() - client.login(username=admin_user.username, password="password") + client.force_login(admin_user) return client
diff --git a/tests/test_fixtures.py b/tests/test_fixtures.py --- a/tests/test_fixtures.py +++ b/tests/test_fixtures.py @@ -49,6 +49,17 @@ def test_admin_client_no_db_marker(admin_client): assert force_str(resp.content) == "You are an admin" +# For test below. +@pytest.fixture +def existing_admin_user(django_user_model): + return django_user_model._default_manager.create_superuser('admin', None, None) + + +def test_admin_client_existing_user(db, existing_admin_user, admin_user, admin_client): + resp = admin_client.get("/admin-required/") + assert force_str(resp.content) == "You are an admin" + + @pytest.mark.django_db def test_admin_user(admin_user, django_user_model): assert isinstance(admin_user, django_user_model)
admin_client is not checking for login success `client.login` inside `admin_client` can return `False` in the case when there's an existing admin user with a password set to something other than `'password'`. Perhaps, `admin_client` should use `force_login` instead?
Seems sensible Can you provide a failing test and fix in a PR? Sure, I'll do that in the next couple of days.
2020-10-09T12:42:31
python
Easy
End of preview. Expand in Data Studio

Dataset Summary

Multi-Docker-Eval is a multi-language, multi-dimensional benchmark designed to rigorously evaluate the capability of Large Language Model (LLM)-based agents in automating a critical yet underexplored task: constructing executable Docker environments for real-world software repositories.

How to Use

from datasets import load_dataset
ds = load_dataset('litble/Multi-Docker-Eval')

Dataset Structure

The data format of Multi-Docker-Eval is directly compatible with the swe-factory and RepoLaunch frameworks.

Field name Type Description
instance_id str A formatted instance identifier, usually as repo_owner__repo_name-PR-number.
repo str The repository owner/name identifier from GitHub.
pull_number int64 The GitHub pull request number that contains the solution (patch and test_patch).
issue_numbers list(str) A list of the GitHub issue numbers that are addressed and resolved by this pull request.
base_commit str The commit hash of the repository representing the HEAD of the repository before the solution PR is applied.
patch str The gold patch, the patch generated by the PR (minus test-related code), that resolved the issue.
test_patch str A test-file patch that was contributed by the solution PR.
problem_statement str The issue title and body.
hints_text str Comments made on the issue prior to the creation of the solution PR’s first commit creation date.
created_at timestamp[s] The creation date of the pull request.
language str The programming language of the repository.
label str Classification of environmental configuration difficulty (Easy/Hard).
Downloads last month
16