func_before stringlengths 12 22.8k | func_after stringlengths 11 24.7k | commit_msg stringlengths 1 32.6k ⌀ | commit_url stringlengths 48 124 | cve_id stringclasses 530 values | cwe_id stringclasses 134 values | file_name stringlengths 4 244 | vulnerability_score int64 0 4 | extension stringclasses 6 values | is_test bool 1 class | date stringdate 1999-11-10 02:42:49 2024-01-29 16:00:57 ⌀ |
|---|---|---|---|---|---|---|---|---|---|---|
private void saveServerPreference(String server, String user, String password, String token, String salt) {
if (user != null) PreferenceUtil.getInstance(context).setUser(user);
if (server != null) PreferenceUtil.getInstance(context).setServer(server);
if (password != null) PreferenceUtil.getInstance(context).setPassword(password);
if (token != null && salt != null) {
String serverID = UUID.randomUUID().toString();
PreferenceUtil.getInstance(context).setPassword(null);
PreferenceUtil.getInstance(context).setToken(token);
PreferenceUtil.getInstance(context).setSalt(salt);
PreferenceUtil.getInstance(context).setServerId(serverID);
loginViewModel.addServer(new Server(serverID, this.serverName, this.username, this.server, token, salt, System.currentTimeMillis()));
return;
}
App.getSubsonicClientInstance(requireContext(), true);
} | private void saveServerPreference(String server, String user, String password, String token, String salt) {
if (user != null) PreferenceUtil.getInstance(context).setUser(user);
if (server != null) PreferenceUtil.getInstance(context).setServer(server);
if (password != null) PreferenceUtil.getInstance(context).setPassword(password);
if (token != null && salt != null) {
String serverID = UUID.randomUUID().toString();
PreferenceUtil.getInstance(context).setPassword(null);
PreferenceUtil.getInstance(context).setToken(token);
PreferenceUtil.getInstance(context).setSalt(salt);
PreferenceUtil.getInstance(context).setServerId(serverID);
loginViewModel.addServer(new Server(serverID, this.serverName, this.username, this.server, token, salt, System.currentTimeMillis()));
return;
}
App.getSubsonicClientInstance(context, true);
} | Reverted corner rounding in playlist adapters | https://github.com/CappielloAntonio/tempo/commit/f109f779dcbb7db419f2ec63c1616372d061b946 | null | null | app/src/main/java/com/cappielloantonio/play/ui/fragment/dialog/ServerSignupDialog.java | 0 | java | false | 2021-08-09T10:28:34Z |
def _call(
self,
prompt: str,
stop: Optional[Sequence[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of sequences to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
text = []
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
for chunk in self.client(prompt, stop=stop, stream=True):
text.append(chunk)
_run_manager.on_llm_new_token(chunk, verbose=self.verbose)
return "".join(text) | def _call(
self,
prompt: str,
stop: Optional[Sequence[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of sequences to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
text = []
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
for chunk in self.client(prompt, stop=stop, stream=True):
text.append(chunk)
_run_manager.on_llm_new_token(chunk, verbose=self.verbose)
return "".join(text) | Merge remote-tracking branch 'upstream/master' | https://github.com/hwchase17/langchain/commit/e12294f00cb3c6d3afd6eaf0541dc3056029fc10 | null | null | langchain/llms/ctransformers.py | 0 | py | false | 2023-06-21T06:45:39Z |
@Override
public FileDiffOutput getModifiedFileAgainstParent(
Project.NameKey project,
ObjectId newCommit,
@Nullable Integer parent,
String fileName,
@Nullable DiffPreferencesInfo.Whitespace whitespace)
throws DiffNotAvailableException {
try {
DiffParameters diffParams = computeDiffParameters(project, newCommit, parent);
FileDiffCacheKey key =
createFileDiffCacheKey(
project,
diffParams.baseCommit(),
newCommit,
fileName,
diffParams.diffAlgorithm(),
whitespace);
return getModifiedFileWithTimeout(key, diffParams);
} catch (IOException e) {
throw new DiffNotAvailableException(
"Failed to evaluate the parent/base commit for commit " + newCommit, e);
}
} | @Override
public FileDiffOutput getModifiedFileAgainstParent(
Project.NameKey project,
ObjectId newCommit,
@Nullable Integer parent,
String fileName,
@Nullable DiffPreferencesInfo.Whitespace whitespace)
throws DiffNotAvailableException {
try {
DiffParameters diffParams = computeDiffParameters(project, newCommit, parent);
FileDiffCacheKey key =
createFileDiffCacheKey(
project,
diffParams.baseCommit(),
newCommit,
fileName,
DEFAULT_DIFF_ALGORITHM,
/* useTimeout= */ true,
whitespace);
return getModifiedFileForKey(key);
} catch (IOException e) {
throw new DiffNotAvailableException(
"Failed to evaluate the parent/base commit for commit " + newCommit, e);
}
} | Use faster fallback diff algorithm in case of timeouts
Current logic uses the default diff algorithm "Histogram Diff". In case
of timeouts, it throws an exception that is propagated to the caller.
In this change, we adjust the logic to follow what was implemented in
PatchListLoader (the old diff cache): If the diff execution times out,
we fallback to the faster histogram algorithm without myers diff and log
a warning message. Notice that in DiffOperations, the fallback is done
outside the cache, i.e. we are requesting the diff from the cache with
new keys that specify which algorithm should be used. In old diff cache,
this fallback was done inside the cache loader. This is one of the
advantages of the new diff cache that we can explicitly specify which
algorithm to use as part of the cache key.
This change implies that slow requests will always have to go through
the timeout before requesting the diffs using the faster algorithm. In a
follow up change, we can enhance the logic by caching negative results
for the histogram diff keys to quickly fallback without having to
hit the timeout.
Change-Id: I34fe29dc166534d835c97beab85661facafac31f | https://github.com/GerritCodeReview/gerrit/commit/541ac10c62fbfa3ad2204e9171c99c499ede46e7 | null | null | java/com/google/gerrit/server/patch/DiffOperationsImpl.java | 0 | java | false | null |
static void lookup_fsmonitor_settings(struct repository *r)
{
const char *const_str;
int bool_value;
if (r->settings.fsmonitor)
return;
/*
* Overload the existing "core.fsmonitor" config setting (which
* has historically been either unset or a hook pathname) to
* now allow a boolean value to enable the builtin FSMonitor
* or to turn everything off. (This does imply that you can't
* use a hook script named "true" or "false", but that's OK.)
*/
switch (repo_config_get_maybe_bool(r, "core.fsmonitor", &bool_value)) {
case 0: /* config value was set to <bool> */
if (bool_value)
fsm_settings__set_ipc(r);
else
fsm_settings__set_disabled(r);
return;
case 1: /* config value was unset */
const_str = getenv("GIT_TEST_FSMONITOR");
break;
case -1: /* config value set to an arbitrary string */
if (repo_config_get_pathname(r, "core.fsmonitor", &const_str))
return; /* should not happen */
break;
default: /* should not happen */
return;
}
if (const_str && *const_str)
fsm_settings__set_hook(r, const_str);
else
fsm_settings__set_disabled(r);
} | static void lookup_fsmonitor_settings(struct repository *r)
{
const char *const_str;
int bool_value;
if (r->settings.fsmonitor)
return;
/*
* Overload the existing "core.fsmonitor" config setting (which
* has historically been either unset or a hook pathname) to
* now allow a boolean value to enable the builtin FSMonitor
* or to turn everything off. (This does imply that you can't
* use a hook script named "true" or "false", but that's OK.)
*/
switch (repo_config_get_maybe_bool(r, "core.fsmonitor", &bool_value)) {
case 0: /* config value was set to <bool> */
if (bool_value)
fsm_settings__set_ipc(r);
else
fsm_settings__set_disabled(r);
return;
case 1: /* config value was unset */
if (check_deprecated_builtin_config(r))
return;
const_str = getenv("GIT_TEST_FSMONITOR");
break;
case -1: /* config value set to an arbitrary string */
if (check_deprecated_builtin_config(r) ||
repo_config_get_pathname(r, "core.fsmonitor", &const_str))
return;
break;
default: /* should not happen */
return;
}
if (const_str && *const_str)
fsm_settings__set_hook(r, const_str);
else
fsm_settings__set_disabled(r);
} | fsmonitor: reintroduce core.useBuiltinFSMonitor
Reintroduce the 'core.useBuiltinFSMonitor' config setting (originally added
in 0a756b2a25 (fsmonitor: config settings are repository-specific,
2021-03-05)) after its removal from the upstream version of FSMonitor.
Upstream, the 'core.useBuiltinFSMonitor' setting was rendered obsolete by
"overloading" the 'core.fsmonitor' setting to take a boolean value. However,
several applications (e.g., 'scalar') utilize the original config setting,
so it should be preserved for a deprecation period before complete removal:
* if 'core.fsmonitor' is a boolean, the user is correctly using the new
config syntax; do not use 'core.useBuiltinFSMonitor'.
* if 'core.fsmonitor' is unspecified, use 'core.useBuiltinFSMonitor'.
* if 'core.fsmonitor' is a path, override and use the builtin FSMonitor if
'core.useBuiltinFSMonitor' is 'true'; otherwise, use the FSMonitor hook
indicated by the path.
Additionally, for this deprecation period, advise users to switch to using
'core.fsmonitor' to specify their use of the builtin FSMonitor.
Signed-off-by: Victoria Dye <vdye@github.com> | https://github.com/git-for-windows/git/commit/31f219515ae9ef5e3f3e32cd7c6a054de67bb58b | null | null | fsmonitor-settings.c | 0 | c | false | null |
function m(){var b,c,d,e,f,h,i;if(b=vc,c=N(),c!==X)if(ia.test(a.charAt(vc))?(d=a.charAt(vc),vc++):(d=X,0===Bc&&g(ja)),d!==X){for(e=[],f=S();f!==X;)e.push(f),f=S();e!==X?(f=v(),f!==X?(h=o(),h!==X?(i=p(),i!==X?(wc=b,c=ka(d,f,h,i),b=c):(vc=b,b=aa)):(vc=b,b=aa)):(vc=b,b=aa)):(vc=b,b=aa)}else vc=b,b=aa;else vc=b,b=aa;return b} | function m(){var b,c,d,e,f,h,i;if(b=sc,c=N(),c!==X)if(fa.test(a.charAt(sc))?(d=a.charAt(sc),sc++):(d=X,0===xc&&g(ga)),d!==X){for(e=[],f=S();f!==X;)e.push(f),f=S();e!==X?(f=v(),f!==X?(h=o(),h!==X?(i=p(),i!==X?(tc=b,c=ha(d,f,h,i),b=c):(sc=b,b=X)):(sc=b,b=X)):(sc=b,b=X)):(sc=b,b=X)}else sc=b,b=X;else sc=b,b=X;return b} | fix for prototype pollution vulnerability | https://github.com/linkedin/dustjs/commit/ddb6523832465d38c9d80189e9de60519ac307c3 | CVE-2021-4264 | ['CWE-1321'] | dist/dust-full.min.js | 0 | js | false | 2021-09-08T00:12:45Z |
def get_available_packages(
include_non_provider_doc_packages: bool = False,
include_all_providers: bool = False,
include_suspended: bool = False,
include_removed: bool = False,
include_not_ready: bool = False,
) -> list[str]:
"""
Return provider ids for all packages that are available currently (not suspended).
:rtype: object
:param include_suspended: whether the suspended packages should be included
:param include_removed: whether the removed packages should be included
:param include_not_ready: whether the not-ready ppackages should be included
:param include_non_provider_doc_packages: whether the non-provider doc packages should be included
(packages like apache-airflow, helm-chart, docker-stack)
:param include_all_providers: whether "all-providers" should be included ni the list.
"""
provider_ids: list[str] = list(json.loads(PROVIDER_DEPENDENCIES_JSON_FILE_PATH.read_text()).keys())
available_packages = []
not_ready_provider_ids = get_not_ready_provider_ids()
if not include_not_ready:
provider_ids = [
provider_id for provider_id in provider_ids if provider_id not in not_ready_provider_ids
]
if include_non_provider_doc_packages:
available_packages.extend(REGULAR_DOC_PACKAGES)
if include_all_providers:
available_packages.append("all-providers")
available_packages.extend(provider_ids)
if include_suspended:
available_packages.extend(get_suspended_provider_ids())
if include_removed:
available_packages.extend(get_removed_provider_ids())
return sorted(set(available_packages)) | def get_available_packages(
include_non_provider_doc_packages: bool = False,
include_all_providers: bool = False,
include_suspended: bool = False,
include_removed: bool = False,
include_not_ready: bool = False,
include_regular: bool = True,
) -> list[str]:
"""
Return provider ids for all packages that are available currently (not suspended).
:rtype: object
:param include_suspended: whether the suspended packages should be included
:param include_removed: whether the removed packages should be included
:param include_not_ready: whether the not-ready packages should be included
:param include_regular: whether the regular packages should be included
:param include_non_provider_doc_packages: whether the non-provider doc packages should be included
(packages like apache-airflow, helm-chart, docker-stack)
:param include_all_providers: whether "all-providers" should be included ni the list.
"""
provider_dependencies = json.loads(PROVIDER_DEPENDENCIES_JSON_FILE_PATH.read_text())
valid_states = set()
if include_not_ready:
valid_states.add("not-ready")
if include_regular:
valid_states.add("ready")
if include_suspended:
valid_states.add("suspended")
if include_removed:
valid_states.add("removed")
available_packages: list[str] = [
provider_id
for provider_id, provider_dependencies in provider_dependencies.items()
if provider_dependencies["state"] in valid_states
]
if include_non_provider_doc_packages:
available_packages.extend(REGULAR_DOC_PACKAGES)
if include_all_providers:
available_packages.append("all-providers")
return sorted(set(available_packages)) | Merge branch 'main' into trigger_encryption | https://github.com/apache/airflow/commit/bbc7408e175819eb8a2666acddd5bc48f403baf7 | null | null | dev/breeze/src/airflow_breeze/utils/packages.py | 0 | py | false | 2024-01-08T23:50:49Z |
public static String activeMap() {
// if (true) return "sscai/(2)Destination.scx";
// if (true) return "sscai/(2)Heartbreak Ridge.scx";
// if (true) return "sscai/(4)Roadrunner.scx";
// if (true) return "sscai/(?)*.sc?"; // Default map-pack for SSCAIT
// === UMS maps - great for bot development ===============
// === Protoss ============================================
// if (true) return "ums/rav/Dragoons_v_Zerglings.scm";
// if (true) return "ums/rav/ZealDrag_v_LingsHydra.scm";
// if (true) return "ums/rav/ZealDrag_v_ZealDrag.scm";
// if (true) return "ums/rav/Zeal_v_Zeal.scm";
// if (true) return "ums/rav/3Zeal_v_1Zeal.scm";
// if (true) return "ums/rav/4Drag_v_Zeal.scm";
// if (true) return "ums/rav/3Drag_v_1Drag.scm";
// if (true) return "ums/rav/3Drag_v_4Drag.scm";
// if (true) return "ums/rav/4Drag_v_4Drag.scm";
// if (true) return "ums/rav/4Drag_v_5Drag.scm";
// === Terran ==============================================
// if (true) return "ums/rav/T_v_Sunkens.scx";
// if (true) return "ums/rav/T_v_Sunkens2.scx";
// if (true) return "ums/rav/T_v_Sunkens3.scx";
// if (true) return "ums/rav/Wraiths_v_Probes.scm";
// if (true) return "ums/rav/Wraiths_v_Cannons.scm";
// if (true) return "ums/rav/M&M_v_Dragoons_A.scx";
// if (true) return "ums/rav/M&M_v_Dragoons_B.scx";
// if (true) return "ums/rav/M_v_Zealots.scx";
// if (true) return "ums/rav/Vultures_v_Dragoons.scm";
// if (true) return "ums/rav/Vultures_v_Marines.scm";
// if (true) return "ums/rav/Vultures_v_Zealots.scm";
// if (true) return "ums/rav/TanksM&M_v_ZealDrag.scx";
// if (true) return "ums/rav/minimaps/M_v_Zealots.scx";
// if (true) return "ums/rav/minimaps/M&M_v_Zealots.scx";
// if (true) return "ums/rav/minimap2s/3M_v_2Zealots.scx";
// if (true) return "ums/rav/minimaps/4M_v_2Zealots.scx";
// if (true) return "ums/rav/M&M_v_M&M.scx"; // Yours in bad line formation, ~10 away from enemies
// if (true) return "ums/rav/M&M_v_M&M_2.scx"; // Standing in lines, shooting at another
if (true) return "ums/rav/M&M_v_M&M_3.scx"; // You attacking behind the corner
// === Gosu bots - advanced single player cheating bots ====
// if (true) return "ums/7th.scx"; // v. AI Protoss player, that can kill CSv constructing
// if (true) return "ums/exp_skilltest.scx"; // v. AI Protoss player
// if (true) return "ums/vsGosuComputer.scx"; // v. AI Zerg Player - cheating as fuck
// if (true) return "ums/lt-terran1j.scm"; // Zerg v. Terran
// if (true) return "ums/member_test.scx"; // v. AI 2x Protoss players, massive Zealot rush
// if (true) return "ums/LostTemple.scm"; // v. 2x Protoss players, again Zealot rush
// =========================================================
// if (true) return "ums/gol_v_zeals.scx";
// if (true) return "ums/aaa (1).scx"; // Nice mini maps for terran
// if (true) return "ums/aaa (2).scx";
// if (true) return "ums/aaa (3).scx";
// if (true) return "ums/aaa (4).scx";
// if (true) return "ums/aaa (5).scx";
// if (true) return "ums/aaa (6).scx";
// if (true) return "ums/marines/m (5).scx"; // Nice map to test different terran infantry in rounds
// if (true) return "ums/marines/m (8).scx"; // Hmm
// === Generic ======================================================
// if (true) return "ums/dragoons_v_map.scx"; // 4 Dragoons attacking Zealots
// if (true) return "ums/mar_v_zea.scx"; // Marines & Medics v. Zealots on quite small map
// if (true) return "ums/NeWconTrol.scx"; // Cool minigames, starting with 2 drones v. 2 drones, lings v. goons etc
// if (true) return "ums/training-PvT.scx"; // Dragoons & Zealots v. Vultures & Tanks + slowly Hi-Templars & Archons
// if (true) return "ums/marines_v_zerglings.scm"; // 12 marines v. 24 zerglings
// if (true) return "ums/ConTrol2.scx"; // More minigames
// if (true) return "ums/micro challenge.scx"; // Even more minigames
// if (true) return "ums/tank-dropship.scm"; // 2 Tanks & 2 Dropships v. Dragoons
// if (true) return "ums/trainzvreaver.scm"; // Zerglings & Hydras v. 2 Reavers & Shuttle
// if (true) return "ums/trening nr 2.scx";
// if (true) return "ums/micro tvp 1.00.scx"; // Huge Terran army (tanks & vultures) v. Zealots & Hi-Templars & Dragoons
// if (true) return "ums/micro3.scx";
// if (true) return "ums/wraiths_v_carriers_obs.scx"; // Wraiths & Valkyries v. Carriers & Observers
// if (true) return "ums/(1)micro3_007.scx";
// if (true) return "ums/dragoon_sweeping_mines.scm"; // 5 dragoons v. mines
// if (true) return "ums/vulture_control.scx"; // Vulture v. Broodlings
// if (true) return "ums/MultiTask PvT.scx"; // Weird - ums but starts with bases
// if (true) return "ums/ControlFighterTZ-Easy.scx"; // Tanks & Marines v. Zerg
// if (true) return "ums/protoss_micro.scx"; // Huge parallel map, bad performance wise
return null;
} | public static String activeMap() {
// if (true) return "sscai/(2)Destination.scx";
if (true) return "sscai/(2)Heartbreak Ridge.scx";
// if (true) return "sscai/(4)Roadrunner.scx";
// if (true) return "sscai/(?)*.sc?"; // Default map-pack for SSCAIT
// === UMS maps - great for bot development ===============
// === Protoss ============================================
// if (true) return "ums/rav/Dragoons_v_Zerglings.scm";
// if (true) return "ums/rav/ZealDrag_v_LingsHydra.scm";
// if (true) return "ums/rav/ZealDrag_v_ZealDrag.scm";
// if (true) return "ums/rav/Zeal_v_Zeal.scm";
// if (true) return "ums/rav/3Zeal_v_1Zeal.scm";
// if (true) return "ums/rav/4Drag_v_Zeal.scm";
// if (true) return "ums/rav/3Drag_v_1Drag.scm";
// if (true) return "ums/rav/3Drag_v_4Drag.scm";
// if (true) return "ums/rav/4Drag_v_4Drag.scm";
// if (true) return "ums/rav/4Drag_v_5Drag.scm";
// === Terran ==============================================
// if (true) return "ums/rav/T_v_Sunkens.scx";
// if (true) return "ums/rav/T_v_Sunkens2.scx";
// if (true) return "ums/rav/T_v_Sunkens3.scx";
// if (true) return "ums/rav/Wraiths_v_Probes.scm";
// if (true) return "ums/rav/Wraiths_v_Cannons.scm";
// if (true) return "ums/rav/M&M_v_Dragoons_A.scx";
// if (true) return "ums/rav/M&M_v_Dragoons_B.scx";
// if (true) return "ums/rav/M_v_Zealots.scx";
// if (true) return "ums/rav/Vultures_v_Dragoons.scm";
// if (true) return "ums/rav/Vultures_v_Marines.scm";
// if (true) return "ums/rav/Vultures_v_Zealots.scm";
// if (true) return "ums/rav/TanksM&M_v_ZealDrag.scx";
// if (true) return "ums/rav/minimaps/M_v_Zealots.scx";
// if (true) return "ums/rav/minimaps/M&M_v_Zealots.scx";
// if (true) return "ums/rav/minimap2s/3M_v_2Zealots.scx";
// if (true) return "ums/rav/minimaps/4M_v_2Zealots.scx";
// if (true) return "ums/rav/M&M_v_M&M.scx"; // Yours in bad line formation, ~10 away from enemies
// if (true) return "ums/rav/M&M_v_M&M_2.scx"; // Standing in lines, shooting at another
if (true) return "ums/rav/M&M_v_M&M_3.scx"; // You attacking behind the corner
// === Gosu bots - advanced single player cheating bots ====
// if (true) return "ums/7th.scx"; // v. AI Protoss player, that can kill CSv constructing
// if (true) return "ums/exp_skilltest.scx"; // v. AI Protoss player
// if (true) return "ums/vsGosuComputer.scx"; // v. AI Zerg Player - cheating as fuck
// if (true) return "ums/lt-terran1j.scm"; // Zerg v. Terran
// if (true) return "ums/member_test.scx"; // v. AI 2x Protoss players, massive Zealot rush
// if (true) return "ums/LostTemple.scm"; // v. 2x Protoss players, again Zealot rush
// =========================================================
// if (true) return "ums/gol_v_zeals.scx";
// if (true) return "ums/aaa (1).scx"; // Nice mini maps for terran
// if (true) return "ums/aaa (2).scx";
// if (true) return "ums/aaa (3).scx";
// if (true) return "ums/aaa (4).scx";
// if (true) return "ums/aaa (5).scx";
// if (true) return "ums/aaa (6).scx";
// if (true) return "ums/marines/m (5).scx"; // Nice map to test different terran infantry in rounds
// if (true) return "ums/marines/m (8).scx"; // Hmm
// === Generic ======================================================
// if (true) return "ums/dragoons_v_map.scx"; // 4 Dragoons attacking Zealots
// if (true) return "ums/mar_v_zea.scx"; // Marines & Medics v. Zealots on quite small map
// if (true) return "ums/NeWconTrol.scx"; // Cool minigames, starting with 2 drones v. 2 drones, lings v. goons etc
// if (true) return "ums/training-PvT.scx"; // Dragoons & Zealots v. Vultures & Tanks + slowly Hi-Templars & Archons
// if (true) return "ums/marines_v_zerglings.scm"; // 12 marines v. 24 zerglings
// if (true) return "ums/ConTrol2.scx"; // More minigames
// if (true) return "ums/micro challenge.scx"; // Even more minigames
// if (true) return "ums/tank-dropship.scm"; // 2 Tanks & 2 Dropships v. Dragoons
// if (true) return "ums/trainzvreaver.scm"; // Zerglings & Hydras v. 2 Reavers & Shuttle
// if (true) return "ums/trening nr 2.scx";
// if (true) return "ums/micro tvp 1.00.scx"; // Huge Terran army (tanks & vultures) v. Zealots & Hi-Templars & Dragoons
// if (true) return "ums/micro3.scx";
// if (true) return "ums/wraiths_v_carriers_obs.scx"; // Wraiths & Valkyries v. Carriers & Observers
// if (true) return "ums/(1)micro3_007.scx";
// if (true) return "ums/dragoon_sweeping_mines.scm"; // 5 dragoons v. mines
// if (true) return "ums/vulture_control.scx"; // Vulture v. Broodlings
// if (true) return "ums/MultiTask PvT.scx"; // Weird - ums but starts with bases
// if (true) return "ums/ControlFighterTZ-Easy.scx"; // Tanks & Marines v. Zerg
// if (true) return "ums/protoss_micro.scx"; // Huge parallel map, bad performance wise
return null;
} | Major improvement to marines & medic coordination, marines waiting for medics in range to attack | https://github.com/Ravaelles/Atlantis/commit/81eb30b4368b10865c1d7d216b9dabea0acbe8e6 | null | null | src/atlantis/config/UseMap.java | 0 | java | false | null |
protected void moveAndRotateMatrixToMatchBone(PoseStack stack, GeoBone bone) {
// First, let's move our render position to the pivot point...
stack.translate(bone.getPivotX() / 16, bone.getPivotY() / 16, bone.getPivotZ() / 16);
stack.mulPose(Vector3f.XP.rotationDegrees(bone.getRotationX()));
stack.mulPose(Vector3f.YP.rotationDegrees(bone.getRotationY()));
stack.mulPose(Vector3f.ZP.rotationDegrees(bone.getRotationZ()));
} | @Deprecated(forRemoval = true)
protected void moveAndRotateMatrixToMatchBone(PoseStack stack, GeoBone bone) {
RenderUtils.translateAndRotateMatrixForBone(stack, bone);
} | Fixes vanilla armor crash | https://github.com/bernie-g/geckolib/commit/a899bbfadaa431774743d376b24fef4c5376fb95 | null | null | Forge/src/main/java/software/bernie/geckolib3/renderers/geo/ExtendedGeoEntityRenderer.java | 0 | java | false | 2022-10-31T20:10:55Z |
function _u(n){return!!du(n)&&(n=Ot(n),"[object Function]"==n||"[object GeneratorFunction]"==n||"[object AsyncFunction]"==n||"[object Proxy]"==n)} | function _u(n){return"number"==typeof n?n:bc(n)?Cn:+n} | Update vendor libs | https://github.com/cockpit-hq/cockpit/commit/690016208850f2d788ebc3c67884d4c692587eb8 | CVE-2023-1160 | ['CWE-1103'] | modules/App/assets/vendor/lodash.js | 0 | js | false | 2023-02-22T01:10:02Z |
@ApiStatus.Internal
public static void init()
{
var overlays = new HashMap<ResourceLocation, IGuiOverlay>();
var orderedOverlays = new ArrayList<ResourceLocation>();
preRegisterVanillaOverlays(overlays, orderedOverlays);
var event = new RegisterGuiOverlaysEvent(overlays, orderedOverlays);
ModLoader.get().postEventWithWrapInModOrder(event, (mc, e) -> ModLoadingContext.get().setActiveContainer(mc), (mc, e) -> ModLoadingContext.get().setActiveContainer(null));
OVERLAYS = orderedOverlays.stream()
.map(id -> new NamedGuiOverlay(id, overlays.get(id)))
.collect(ImmutableList.toImmutableList());
OVERLAYS_BY_NAME = OVERLAYS.stream()
.collect(ImmutableMap.toImmutableMap(NamedGuiOverlay::id, Function.identity()));
} | @ApiStatus.Internal
public static void init()
{
var overlays = new HashMap<ResourceLocation, IGuiOverlay>();
var orderedOverlays = new ArrayList<ResourceLocation>();
preRegisterVanillaOverlays(overlays, orderedOverlays);
var event = new RegisterGuiOverlaysEvent(overlays, orderedOverlays);
ModLoader.get().postEventWithWrapInModOrder(event, (mc, e) -> ModLoadingContext.get().setActiveContainer(mc), (mc, e) -> ModLoadingContext.get().setActiveContainer(null));
OVERLAYS = orderedOverlays.stream()
.map(id -> new NamedGuiOverlay(id, overlays.get(id)))
.collect(ImmutableList.toImmutableList());
OVERLAYS_BY_NAME = OVERLAYS.stream()
.collect(ImmutableMap.toImmutableMap(NamedGuiOverlay::id, Function.identity()));
assignVanillaOverlayTypes();
} | Multiple tweaks and fixes to the recent changes in the client refactor PR (#8836)
* Add an easy way to get the NamedGuiOverlay from a vanilla overlay
* Fix static member ordering crash in UnitTextureAtlasSprite
* Allow boss bar rendering to be cancelled
* Make fluid container datagen use the new name | https://github.com/MinecraftForge/MinecraftForge/commit/a98eeb9d089e667dee55cee65e2f9d790bc9bcdc | null | null | src/main/java/net/minecraftforge/client/gui/overlay/GuiOverlayManager.java | 0 | java | false | null |
function remove_cluster(ids) {
var data = {};
$.each(ids, function(_, cluster) {
data[ "clusterid-" + cluster] = true;
});
$.ajax({
type: 'POST',
url: '/manage/removecluster',
data: data,
timeout: pcs_timeout,
success: function () {
$("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
Pcs.update();
},
error: function (xhr, status, error) {
alert("Unable to remove cluster: " + res + " ("+error+")");
$("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
}
});
} | function remove_cluster(ids) {
var data = {};
$.each(ids, function(_, cluster) {
data[ "clusterid-" + cluster] = true;
});
ajax_wrapper({
type: 'POST',
url: '/manage/removecluster',
data: data,
timeout: pcs_timeout,
success: function () {
$("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
Pcs.update();
},
error: function (xhr, status, error) {
alert("Unable to remove cluster: " + res + " ("+error+")");
$("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
}
});
} | web UI auth: add js/ajax login dialog | https://github.com/ClusterLabs/pcs/commit/acdbbe8307e6f4a36b2c7754765e732e43fe8d17 | CVE-2016-0721 | ['CWE-384'] | pcsd/public/js/pcsd.js | 0 | js | false | 2016-02-08T16:13:19Z |
def _generate_message(self) -> str:
msg = _(
"Failed to execute %(query)s",
query=self.sql_json_execution_context.get_query_details(),
)
if self.failed_reason_msg:
msg = msg + self.failed_reason_msg
if self.suggestion_help_msg is not None:
msg = "{} {} {}".format(msg, os.linesep, self.suggestion_help_msg)
return msg | def _generate_message(self) -> str:
msg = _(
"Failed to execute %(query)s",
query=self.sql_json_execution_context.get_query_details(),
)
if self.failed_reason_msg:
msg = msg + self.failed_reason_msg
if self.suggestion_help_msg is not None:
msg = f"{msg} {os.linesep} {self.suggestion_help_msg}"
return msg | Merge branch 'master' into fix/db-val-param-perms | https://github.com/apache/superset/commit/4e2fd6f4f04c61e8c1d3ec3f233581a05f8b6213 | null | null | superset/sqllab/exceptions.py | 0 | py | false | 2023-06-05T08:42:54Z |
async prepareResult() {
const { surveyId } = this.context
this.setResult({
surveyId
})
} | async beforeSuccess() {
const { surveyId } = this.context
this.setResult({
surveyId
})
} | Merge branch 'master' of https://github.com/openforis/arena into sort-sql-injection-fix | https://github.com/openforis/arena/commit/b61e40964529677193491ff1145efd06b4ed38c7 | CVE-2019-15567 | ['CWE-89'] | server/modules/collectImport/service/collectImport/collectImportJob.js | 0 | js | false | null |
@SuppressWarnings({ "unchecked", "rawtypes" })
@SubscribeEvent
public static void registerModels(ModelRegistryEvent event){
//Register the global entity rendering class.
RenderingRegistry.registerEntityRenderingHandler(BuilderEntity.class, new IRenderFactory<BuilderEntity>(){
@Override
public Render<? super BuilderEntity> createRenderFor(RenderManager manager){
return new Render<BuilderEntity>(manager){
@Override
protected ResourceLocation getEntityTexture(BuilderEntity builder){
return null;
}
@Override
public void doRender(BuilderEntity builder, double x, double y, double z, float entityYaw, float partialTicks){
if(builder.entity != null && builder.entity instanceof AEntityC_Definable){
if(builder.renderData.shouldRender()){
AEntityC_Definable<?> internalEntity = ((AEntityC_Definable<?>) builder.entity);
internalEntity.getRenderer().render(internalEntity, MinecraftForgeClient.getRenderPass() == 1, partialTicks);
}
}
}
};
}});
//Register the TESR wrapper.
ClientRegistry.bindTileEntitySpecialRenderer(BuilderTileEntity.class, new TileEntitySpecialRenderer<BuilderTileEntity>(){
@Override
public void render(BuilderTileEntity builder, double x, double y, double z, float partialTicks, int destroyStage, float alpha){
if(builder.tileEntity != null){
if(builder.renderData.shouldRender()){
builder.tileEntity.getRenderer().render(builder.tileEntity, MinecraftForgeClient.getRenderPass() == 1, partialTicks);
}
}
}
});
//Get the list of default resource packs here to inject a custom parser for auto-generating JSONS.
//FAR easier than trying to use the bloody bakery system.
//Normally we'd add our pack to the current loader, but this gets wiped out during reloads and unless we add our pack to the main list, it won't stick.
//To do this, we use reflection to get the field from the main MC class that holds the master list to add our custom ones.
//((SimpleReloadableResourceManager) Minecraft.getMinecraft().getResourceManager()).reloadResourcePack(new PackResourcePack(MasterLoader.MODID + "_packs"));
List<IResourcePack> defaultPacks = null;
for(Field field : Minecraft.class.getDeclaredFields()){
if(field.getName().equals("defaultResourcePacks") || field.getName().equals("field_110449_ao")){
try{
if(!field.isAccessible()){
field.setAccessible(true);
}
defaultPacks = (List<IResourcePack>) field.get(Minecraft.getMinecraft());
}catch(Exception e){
e.printStackTrace();
}
}
}
//Check to make sure we have the pack list before continuing.
if(defaultPacks == null){
InterfaceCore.logError("Could not get default pack list. Item icons will be disabled.");
return;
}
//Now that we have the custom resource pack location, add our built-in loader.
//This one auto-generates item JSONs.
defaultPacks.add(new PackResourcePack(MasterLoader.MODID + "_packs"));
//Register the core item models. Some of these are pack-based.
//Don't add those as they get added during the pack registration processing.
for(Entry<AItemBase, BuilderItem> entry : BuilderItem.itemMap.entrySet()){
try{
//TODO remove this when we don't have non-pack items.
if(!(entry.getValue().item instanceof AItemPack)){
registerCoreItemRender(entry.getValue());
}
}catch(Exception e){
e.printStackTrace();
}
}
//Now register items for the packs.
//If we ever register a pack item from a non-external pack, we'll need to make a resource loader for it.
//This is done to allow MC/Forge to play nice with item textures.
for(AItemPack<?> packItem : PackParserSystem.getAllPackItems()){
//TODO remove this when the internal system actually works.
if(PackParserSystem.getPackConfiguration(packItem.definition.packID) == null || PackParserSystem.getPackConfiguration(packItem.definition.packID).internallyGenerated){
ModelLoader.setCustomModelResourceLocation(packItem.getBuilder(), 0, new ModelResourceLocation(MasterLoader.MODID + "_packs:" + packItem.definition.packID + AItemPack.PACKID_SEPARATOR + packItem.getRegistrationName(), "inventory"));
}else{
if(!PackResourcePack.createdLoaders.containsKey(packItem.definition.packID)){
defaultPacks.add(new PackResourcePack(packItem.definition.packID));
}
ModelLoader.setCustomModelResourceLocation(packItem.getBuilder(), 0, new ModelResourceLocation(MasterLoader.MODID + "_packs:" + packItem.getRegistrationName(), "inventory"));
}
}
//Now that we've created all the pack loaders, reload the resource manager to add them to the systems.
FMLClientHandler.instance().refreshResources(VanillaResourceType.MODELS);
} | @SuppressWarnings({ "unchecked", "rawtypes" })
@SubscribeEvent
public static void registerModels(ModelRegistryEvent event){
//Register the global entity rendering class.
RenderingRegistry.registerEntityRenderingHandler(BuilderEntity.class, new IRenderFactory<BuilderEntity>(){
@Override
public Render<? super BuilderEntity> createRenderFor(RenderManager manager){
return new Render<BuilderEntity>(manager){
@Override
protected ResourceLocation getEntityTexture(BuilderEntity builder){
return null;
}
@Override
public void doRender(BuilderEntity builder, double x, double y, double z, float entityYaw, float partialTicks){
if(builder.entity != null && builder.entity instanceof AEntityC_Definable){
if(builder.renderData.shouldRender()){
AEntityC_Definable<?> internalEntity = ((AEntityC_Definable<?>) builder.entity);
internalEntity.getRenderer().render(internalEntity, MinecraftForgeClient.getRenderPass() == 1, partialTicks);
}
}
}
};
}});
//Register the TESR wrapper.
ClientRegistry.bindTileEntitySpecialRenderer(BuilderTileEntity.class, new TileEntitySpecialRenderer<BuilderTileEntity>(){
@Override
public void render(BuilderTileEntity builder, double x, double y, double z, float partialTicks, int destroyStage, float alpha){
if(builder.tileEntity != null){
if(builder.renderData.shouldRender()){
if(!builder.getWorld().isAirBlock(builder.getPos())){
builder.tileEntity.getRenderer().render(builder.tileEntity, MinecraftForgeClient.getRenderPass() == 1, partialTicks);
}
}
}
}
});
//Get the list of default resource packs here to inject a custom parser for auto-generating JSONS.
//FAR easier than trying to use the bloody bakery system.
//Normally we'd add our pack to the current loader, but this gets wiped out during reloads and unless we add our pack to the main list, it won't stick.
//To do this, we use reflection to get the field from the main MC class that holds the master list to add our custom ones.
//((SimpleReloadableResourceManager) Minecraft.getMinecraft().getResourceManager()).reloadResourcePack(new PackResourcePack(MasterLoader.MODID + "_packs"));
List<IResourcePack> defaultPacks = null;
for(Field field : Minecraft.class.getDeclaredFields()){
if(field.getName().equals("defaultResourcePacks") || field.getName().equals("field_110449_ao")){
try{
if(!field.isAccessible()){
field.setAccessible(true);
}
defaultPacks = (List<IResourcePack>) field.get(Minecraft.getMinecraft());
}catch(Exception e){
e.printStackTrace();
}
}
}
//Check to make sure we have the pack list before continuing.
if(defaultPacks == null){
InterfaceCore.logError("Could not get default pack list. Item icons will be disabled.");
return;
}
//Now that we have the custom resource pack location, add our built-in loader.
//This one auto-generates item JSONs.
defaultPacks.add(new PackResourcePack(MasterLoader.MODID + "_packs"));
//Register the core item models. Some of these are pack-based.
//Don't add those as they get added during the pack registration processing.
for(Entry<AItemBase, BuilderItem> entry : BuilderItem.itemMap.entrySet()){
try{
//TODO remove this when we don't have non-pack items.
if(!(entry.getValue().item instanceof AItemPack)){
registerCoreItemRender(entry.getValue());
}
}catch(Exception e){
e.printStackTrace();
}
}
//Now register items for the packs.
//If we ever register a pack item from a non-external pack, we'll need to make a resource loader for it.
//This is done to allow MC/Forge to play nice with item textures.
for(AItemPack<?> packItem : PackParserSystem.getAllPackItems()){
//TODO remove this when the internal system actually works.
if(PackParserSystem.getPackConfiguration(packItem.definition.packID) == null || PackParserSystem.getPackConfiguration(packItem.definition.packID).internallyGenerated){
ModelLoader.setCustomModelResourceLocation(packItem.getBuilder(), 0, new ModelResourceLocation(MasterLoader.MODID + "_packs:" + packItem.definition.packID + AItemPack.PACKID_SEPARATOR + packItem.getRegistrationName(), "inventory"));
}else{
if(!PackResourcePack.createdLoaders.containsKey(packItem.definition.packID)){
defaultPacks.add(new PackResourcePack(packItem.definition.packID));
}
ModelLoader.setCustomModelResourceLocation(packItem.getBuilder(), 0, new ModelResourceLocation(MasterLoader.MODID + "_packs:" + packItem.getRegistrationName(), "inventory"));
}
}
//Now that we've created all the pack loaders, reload the resource manager to add them to the systems.
FMLClientHandler.instance().refreshResources(VanillaResourceType.MODELS);
} | Made permanent parts not block part removal, and always spawn when the part is added. Addresses #747. | https://github.com/DonBruce64/MinecraftTransportSimulator/commit/5deec3ce60a938570fd7d7948597ba58432c1dc5 | null | null | src/main/java/minecrafttransportsimulator/rendering/components/InterfaceEventsModelLoader.java | 0 | java | false | 2021-03-14T15:51:02Z |
@SuppressWarnings({"unchecked", "rawtypes"})
public static <C> C wrap(final EventType<C> eventType, final C callback) {
switch (eventType.getId()) {
case REQUEST_STARTED_ID:
return (C)
new Supplier<Flow<Object>>() {
@Override
public Flow<Object> get() {
try {
return ((Supplier<Flow<Object>>) callback).get();
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_ENDED_ID:
return (C)
new BiFunction<RequestContext, IGSpanInfo, Flow<Void>>() {
@Override
public Flow<Void> apply(RequestContext ctx, IGSpanInfo agentSpan) {
try {
return ((BiFunction<RequestContext, IGSpanInfo, Flow<Void>>) callback)
.apply(ctx, agentSpan);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_HEADER_DONE_ID:
return (C)
new Function<RequestContext, Flow<Void>>() {
@Override
public Flow<Void> apply(RequestContext ctx) {
try {
return ((Function<RequestContext, Flow<Void>>) callback).apply(ctx);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_HEADER_ID:
return (C)
new TriConsumer<RequestContext, String, String>() {
@Override
public void accept(RequestContext ctx, String key, String value) {
try {
((TriConsumer<RequestContext, String, String>) callback).accept(ctx, key, value);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_METHOD_URI_RAW_ID:
return (C)
new TriFunction<RequestContext, String, URIDataAdapter, Flow<Void>>() {
@Override
public Flow<Void> apply(RequestContext ctx, String method, URIDataAdapter adapter) {
try {
return ((TriFunction<RequestContext, String, URIDataAdapter, Flow<Void>>)
callback)
.apply(ctx, method, adapter);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_CLIENT_SOCKET_ADDRESS_ID:
return (C)
new TriFunction<RequestContext, String, Integer, Flow<Void>>() {
@Override
public Flow<Void> apply(RequestContext ctx, String ip, Integer port) {
try {
return ((TriFunction<RequestContext, String, Integer, Flow<Void>>) callback)
.apply(ctx, ip, port);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_BODY_START_ID:
return (C)
new BiFunction<RequestContext, StoredBodySupplier, Void>() {
@Override
public Void apply(RequestContext ctx, StoredBodySupplier storedBodySupplier) {
try {
return ((BiFunction<RequestContext, StoredBodySupplier, Void>) callback)
.apply(ctx, storedBodySupplier);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return null;
}
}
};
case REQUEST_BODY_DONE_ID:
return (C)
new BiFunction<RequestContext, StoredBodySupplier, Flow<Void>>() {
@Override
public Flow<Void> apply(RequestContext ctx, StoredBodySupplier storedBodySupplier) {
try {
return ((BiFunction<RequestContext, StoredBodySupplier, Flow<Void>>) callback)
.apply(ctx, storedBodySupplier);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
};
default:
log.warn("Unwrapped callback for {}", eventType);
return callback;
}
} | @SuppressWarnings({"unchecked", "rawtypes"})
public static <C> C wrap(final EventType<C> eventType, final C callback) {
switch (eventType.getId()) {
case REQUEST_STARTED_ID:
return (C)
new Supplier<Flow<Object>>() {
@Override
public Flow<Object> get() {
try {
return ((Supplier<Flow<Object>>) callback).get();
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_ENDED_ID:
return (C)
new BiFunction<RequestContext, IGSpanInfo, Flow<Void>>() {
@Override
public Flow<Void> apply(RequestContext ctx, IGSpanInfo agentSpan) {
try {
return ((BiFunction<RequestContext, IGSpanInfo, Flow<Void>>) callback)
.apply(ctx, agentSpan);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_HEADER_DONE_ID:
return (C)
new Function<RequestContext, Flow<Void>>() {
@Override
public Flow<Void> apply(RequestContext ctx) {
try {
return ((Function<RequestContext, Flow<Void>>) callback).apply(ctx);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_HEADER_ID:
return (C)
new TriConsumer<RequestContext, String, String>() {
@Override
public void accept(RequestContext ctx, String key, String value) {
try {
((TriConsumer<RequestContext, String, String>) callback).accept(ctx, key, value);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_METHOD_URI_RAW_ID:
return (C)
new TriFunction<RequestContext, String, URIDataAdapter, Flow<Void>>() {
@Override
public Flow<Void> apply(RequestContext ctx, String method, URIDataAdapter adapter) {
try {
return ((TriFunction<RequestContext, String, URIDataAdapter, Flow<Void>>)
callback)
.apply(ctx, method, adapter);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_CLIENT_SOCKET_ADDRESS_ID:
return (C)
new TriFunction<RequestContext, String, Integer, Flow<Void>>() {
@Override
public Flow<Void> apply(RequestContext ctx, String ip, Integer port) {
try {
return ((TriFunction<RequestContext, String, Integer, Flow<Void>>) callback)
.apply(ctx, ip, port);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
// Make testing easier by delegating equals
@Override
public boolean equals(Object obj) {
return callback.equals(obj);
}
};
case REQUEST_BODY_START_ID:
return (C)
new BiFunction<RequestContext, StoredBodySupplier, Void>() {
@Override
public Void apply(RequestContext ctx, StoredBodySupplier storedBodySupplier) {
try {
return ((BiFunction<RequestContext, StoredBodySupplier, Void>) callback)
.apply(ctx, storedBodySupplier);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return null;
}
}
};
case REQUEST_BODY_DONE_ID:
return (C)
new BiFunction<RequestContext, StoredBodySupplier, Flow<Void>>() {
@Override
public Flow<Void> apply(RequestContext ctx, StoredBodySupplier storedBodySupplier) {
try {
return ((BiFunction<RequestContext, StoredBodySupplier, Flow<Void>>) callback)
.apply(ctx, storedBodySupplier);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
return Flow.ResultFlow.empty();
}
}
};
case RESPONSE_STARTED_ID:
return (C)
new BiConsumer<RequestContext, Integer>() {
@Override
public void accept(RequestContext ctx, Integer status) {
try {
((BiConsumer<RequestContext, Integer>) callback).accept(ctx, status);
} catch (Throwable t) {
log.warn("Callback for {} threw.", eventType, t);
}
}
};
default:
log.warn("Unwrapped callback for {}", eventType);
return callback;
}
} | Attack reports with response status code and blocking status | https://github.com/DataDog/dd-trace-java/commit/6d11f4eedda162989cc2e6b4b91180d8fd619728 | null | null | internal-api/src/main/java/datadog/trace/api/gateway/InstrumentationGateway.java | 0 | java | false | 2021-10-03T21:22:04Z |
AuthenticationToken newSyntheticPassword(int userId) {
clearSidForUser(userId);
AuthenticationToken result = AuthenticationToken.create();
saveEscrowData(result, userId);
return result;
} | SyntheticPassword newSyntheticPassword(int userId) {
clearSidForUser(userId);
SyntheticPassword result = SyntheticPassword.create();
saveEscrowData(result, userId);
return result;
} | Update synthetic password terminology to match new design doc
Update terminology to eliminate ambiguity and to match
http://go/android-locksettings-design :
- The class that represents a synthetic password is now called
SyntheticPassword instead of AuthenticationToken. This eliminates an
inconsistency and avoids ambiguity with the other types of
authentication tokens (HardwareAuthTokens and escrow tokens).
- "LSKF" is now used in preference to "password", which could be
confused with LSKFs of type password and with the many other types of
password (synthetic, Keystore, Gatekeeper). "Password" is still used
in places like "password data", "password metrics", and "password
history"; renaming those in the design doc and code is left for later.
- The things that protect the SP are now called "SP protectors", or just
"protectors" when SP is clear from context. Previously these were
called "synthetic passwords" (ambiguous with the SP) or "SP blobs"
(ambiguous with the spblob file, which is just part of a protector).
- The 64-bit integers that identify protectors are now called "protector
IDs" instead of "synthetic password handles". This avoids ambiguity
with the SP's Gatekeeper password handle (which in the code is just
called a "synthetic password handle"; a later CL might clarify that),
and it clarifies that the identified items are SP protectors, not SPs.
- The secret that each protector uses to protect the SP is now called
the "protector secret" instead of the application ID. This avoids
ambiguity with the Keystore application ID, which isn't being used and
is a less intuitive name.
No behavior changes intended, except for some changed log messages.
Test: atest com.android.server.locksettings
Test: Basic manual test of locksettings core functionality: upgraded a
device that has a pattern set, without wiping userdata; unlocked;
changed to PIN; rebooted; unlocked; changed to swipe; rebooted;
changed to password; rebooted; and unlocked.
Change-Id: I564a738119a47a31b4822d26c6405249f8ce1c06 | https://github.com/LineageOS/android_frameworks_base/commit/c37987fe0d097c3b76188ae14991634c3f8d0715 | null | null | services/core/java/com/android/server/locksettings/SyntheticPasswordManager.java | 0 | java | false | 2022-07-15T00:23:41Z |
public static Message deserialize(String messageJson) {
try {
RawResponse rawResponse = JsonHelper.toObject(messageJson, RawResponse.class);
if (rawResponse == null)
throw new ApiRuntimeException("Could not parse response");
long msgId = 0;
if (rawResponse.getReflection() != null) {
msgId = rawResponse.getReflection().get("msg_id").asLong();
}
if (rawResponse.getType().equals("jsonwsp/fault")) {
Error error = new Error(msgId);
error.setFault(rawResponse.getFault());
} else {
MethodType methodType = MethodType.convert(rawResponse.getMethodname());
switch (Objects.requireNonNull(methodType)) {
case SUBMIT_TX:
return SubmitTxResponse.deserialize(msgId, rawResponse.getResult());
case EVALUATE_TX:
return EvaluateTxResponse.deserialize(msgId, rawResponse.getResult());
}
}
} catch (JsonProcessingException e) {
log.warn("Cannot deserialize message. Message does not contain \"reflection\" parameter", e);
}
return null;
} | public static Message deserialize(String messageJson) {
try {
RawResponse rawResponse = JsonHelper.toObject(messageJson, RawResponse.class);
if (rawResponse == null)
throw new ApiRuntimeException("Could not parse response");
long msgId = 0;
if (rawResponse.getReflection() != null) {
msgId = rawResponse.getReflection().get("msg_id").asLong();
}
if (rawResponse.getType().equals("jsonwsp/fault")) {
Error error = new Error(msgId);
error.setFault(rawResponse.getFault());
} else {
MethodType methodType = MethodType.convert(rawResponse.getMethodname());
switch (Objects.requireNonNull(methodType)) {
case SUBMIT_TX:
return SubmitTxResponse.deserialize(msgId, rawResponse.getResult());
case EVALUATE_TX:
return EvaluateTxResponse.deserialize(msgId, rawResponse.getResult());
case QUERY: {
QueryType queryType = QueryType.convert(rawResponse.getReflection().get("object").asText());
return QueryResponse.parse(queryType, msgId, rawResponse.getResult());
}
}
}
} catch (JsonProcessingException e) {
log.warn("Cannot deserialize message. Message does not contain \"reflection\" parameter", e);
}
return null;
} | README updated to include latest beta. Bump version to next SNAPSHOT | https://github.com/bloxbean/cardano-client-lib/commit/6f850abb946a576673e3d9d50cbc53e944026a5a | null | null | backend-modules/ogmios/src/main/java/com/bloxbean/cardano/client/backend/ogmios/model/base/Message.java | 0 | java | false | 2022-08-08T15:27:44Z |
@SubscribeEvent
public void onItemTooltip(ItemTooltipEvent event) {
if (!neu.isOnSkyblock()) return;
if (event.toolTip == null) return;
if (event.toolTip.size() > 2 && NotEnoughUpdates.INSTANCE.config.tooltipTweaks.hideDefaultReforgeStats) {
String secondLine = StringUtils.stripControlCodes(event.toolTip.get(1));
if (secondLine.equals("Reforge Stone")) {
Integer startIndex = null;
Integer cutoffIndex = null;
//loop from the back of the List to find the wanted index sooner
for (int i = event.toolTip.size() - 1; i >= 0; i--) {
//rarity or mining level requirement
String line = StringUtils.stripControlCodes(event.toolTip.get(i));
if (line.contains("REFORGE STONE") || line.contains("Requires Mining Skill Level")) {
cutoffIndex = i;
}
//The line where the Hypixel stats start
if (line.contains("(Legendary):")) {
startIndex = i;
break;
}
}
if (startIndex != null && cutoffIndex != null && startIndex < cutoffIndex) {
event.toolTip.subList(startIndex, cutoffIndex).clear();
}
}
}
if (Keyboard.isKeyDown(Keyboard.KEY_LCONTROL) && NotEnoughUpdates.INSTANCE.config.hidden.dev &&
event.toolTip.size() > 0 && event.toolTip.get(event.toolTip.size() - 1).startsWith(
EnumChatFormatting.DARK_GRAY + "NBT: ")) {
event.toolTip.remove(event.toolTip.size() - 1);
StringBuilder sb = new StringBuilder();
String nbt = event.itemStack.getTagCompound().toString();
int indent = 0;
for (char c : nbt.toCharArray()) {
boolean newline = false;
if (c == '{' || c == '[') {
indent++;
newline = true;
} else if (c == '}' || c == ']') {
indent--;
sb.append("\n");
for (int i = 0; i < indent; i++) sb.append(" ");
} else if (c == ',') {
newline = true;
} else if (c == '\"') {
sb.append(EnumChatFormatting.RESET).append(EnumChatFormatting.GRAY);
}
sb.append(c);
if (newline) {
sb.append("\n");
for (int i = 0; i < indent; i++) sb.append(" ");
}
}
event.toolTip.add(sb.toString());
if (Keyboard.isKeyDown(Keyboard.KEY_H)) {
if (!copied) {
copied = true;
StringSelection selection = new StringSelection(sb.toString());
Toolkit.getDefaultToolkit().getSystemClipboard().setContents(selection, selection);
}
} else {
copied = false;
}
} else if (NotEnoughUpdates.INSTANCE.packDevEnabled) {
event.toolTip.add("");
event.toolTip.add(EnumChatFormatting.AQUA + "NEU Pack Dev Info:");
event.toolTip.add(
EnumChatFormatting.GRAY + "Press " + EnumChatFormatting.GOLD + "[KEY]" + EnumChatFormatting.GRAY +
" to copy line");
String internal = NotEnoughUpdates.INSTANCE.manager.getInternalNameForItem(event.itemStack);
boolean k = Keyboard.isKeyDown(Keyboard.KEY_K);
boolean m = Keyboard.isKeyDown(Keyboard.KEY_M);
boolean n = Keyboard.isKeyDown(Keyboard.KEY_N);
event.toolTip.add(
EnumChatFormatting.AQUA + "Internal Name: " + EnumChatFormatting.GRAY + internal + EnumChatFormatting.GOLD +
" [K]");
if (!copied && k) {
MiscUtils.copyToClipboard(internal);
}
if (event.itemStack.getTagCompound() != null) {
NBTTagCompound tag = event.itemStack.getTagCompound();
if (tag.hasKey("SkullOwner", 10)) {
GameProfile gameprofile = NBTUtil.readGameProfileFromNBT(tag.getCompoundTag("SkullOwner"));
if (gameprofile != null) {
event.toolTip.add(EnumChatFormatting.AQUA + "Skull UUID: " + EnumChatFormatting.GRAY + gameprofile.getId() +
EnumChatFormatting.GOLD + " [M]");
if (!copied && m) {
MiscUtils.copyToClipboard(gameprofile.getId().toString());
}
Map<MinecraftProfileTexture.Type, MinecraftProfileTexture> map =
Minecraft.getMinecraft().getSkinManager().loadSkinFromCache(gameprofile);
if (map.containsKey(MinecraftProfileTexture.Type.SKIN)) {
MinecraftProfileTexture profTex = map.get(MinecraftProfileTexture.Type.SKIN);
event.toolTip.add(
EnumChatFormatting.AQUA + "Skull Texture Link: " + EnumChatFormatting.GRAY + profTex.getUrl() +
EnumChatFormatting.GOLD + " [N]");
if (!copied && n) {
MiscUtils.copyToClipboard(profTex.getUrl());
}
}
}
}
}
copied = k || m || n;
}
} | @SubscribeEvent
public void onItemTooltip(ItemTooltipEvent event) {
if (!neu.isOnSkyblock()) return;
if (event.toolTip == null) return;
if (event.toolTip.size() > 2 && NotEnoughUpdates.INSTANCE.config.tooltipTweaks.hideDefaultReforgeStats) {
String secondLine = StringUtils.stripControlCodes(event.toolTip.get(1));
if (secondLine.equals("Reforge Stone")) {
Integer startIndex = null;
Integer cutoffIndex = null;
//loop from the back of the List to find the wanted index sooner
for (int i = event.toolTip.size() - 1; i >= 0; i--) {
//rarity or mining level requirement
String line = StringUtils.stripControlCodes(event.toolTip.get(i));
if (line.contains("REFORGE STONE") || line.contains("Requires Mining Skill Level")) {
cutoffIndex = i;
}
//The line where the Hypixel stats start
if (line.contains("(Legendary):")) {
startIndex = i;
break;
}
}
if (startIndex != null && cutoffIndex != null && startIndex < cutoffIndex) {
event.toolTip.subList(startIndex, cutoffIndex).clear();
}
}
}
if (Keyboard.isKeyDown(Keyboard.KEY_LCONTROL) && NotEnoughUpdates.INSTANCE.config.hidden.dev &&
event.toolTip.size() > 0 && event.toolTip.get(event.toolTip.size() - 1).startsWith(
EnumChatFormatting.DARK_GRAY + "NBT: ")) {
event.toolTip.remove(event.toolTip.size() - 1);
StringBuilder sb = new StringBuilder();
String nbt = event.itemStack.getTagCompound().toString();
int indent = 0;
for (char c : nbt.toCharArray()) {
boolean newline = false;
if (c == '{' || c == '[') {
indent++;
newline = true;
} else if (c == '}' || c == ']') {
indent--;
sb.append("\n");
for (int i = 0; i < indent; i++) sb.append(" ");
} else if (c == ',') {
newline = true;
} else if (c == '\"') {
sb.append(EnumChatFormatting.RESET).append(EnumChatFormatting.GRAY);
}
sb.append(c);
if (newline) {
sb.append("\n");
for (int i = 0; i < indent; i++) sb.append(" ");
}
}
event.toolTip.add(sb.toString());
if (Keyboard.isKeyDown(Keyboard.KEY_H)) {
if (!copied) {
copied = true;
StringSelection selection = new StringSelection(sb.toString());
Toolkit.getDefaultToolkit().getSystemClipboard().setContents(selection, selection);
}
} else {
copied = false;
}
} else if (NotEnoughUpdates.INSTANCE.packDevEnabled) {
event.toolTip.add("");
event.toolTip.add(EnumChatFormatting.AQUA + "NEU Pack Dev Info:");
event.toolTip.add(
EnumChatFormatting.GRAY + "Press " + EnumChatFormatting.GOLD + "[KEY]" + EnumChatFormatting.GRAY +
" to copy line");
String internal = NotEnoughUpdates.INSTANCE.manager.getInternalNameForItem(event.itemStack);
boolean k = Keyboard.isKeyDown(Keyboard.KEY_K);
boolean m = Keyboard.isKeyDown(Keyboard.KEY_M);
boolean n = Keyboard.isKeyDown(Keyboard.KEY_N);
boolean f = Keyboard.isKeyDown(Keyboard.KEY_F);
if (!copied && f && NotEnoughUpdates.INSTANCE.config.hidden.dev) {
MiscUtils.copyToClipboard(NotEnoughUpdates.INSTANCE.manager.getSkullValueForItem(event.itemStack));
}
event.toolTip.add(
EnumChatFormatting.AQUA + "Internal Name: " + EnumChatFormatting.GRAY + internal + EnumChatFormatting.GOLD +
" [K]");
if (!copied && k) {
MiscUtils.copyToClipboard(internal);
}
if (event.itemStack.getTagCompound() != null) {
NBTTagCompound tag = event.itemStack.getTagCompound();
if (tag.hasKey("SkullOwner", 10)) {
GameProfile gameprofile = NBTUtil.readGameProfileFromNBT(tag.getCompoundTag("SkullOwner"));
if (gameprofile != null) {
event.toolTip.add(EnumChatFormatting.AQUA + "Skull UUID: " + EnumChatFormatting.GRAY + gameprofile.getId() +
EnumChatFormatting.GOLD + " [M]");
if (!copied && m) {
MiscUtils.copyToClipboard(gameprofile.getId().toString());
}
Map<MinecraftProfileTexture.Type, MinecraftProfileTexture> map =
Minecraft.getMinecraft().getSkinManager().loadSkinFromCache(gameprofile);
if (map.containsKey(MinecraftProfileTexture.Type.SKIN)) {
MinecraftProfileTexture profTex = map.get(MinecraftProfileTexture.Type.SKIN);
event.toolTip.add(
EnumChatFormatting.AQUA + "Skull Texture Link: " + EnumChatFormatting.GRAY + profTex.getUrl() +
EnumChatFormatting.GOLD + " [N]");
if (!copied && n) {
MiscUtils.copyToClipboard(profTex.getUrl());
}
}
}
}
}
copied = k || m || n || f;
}
} | Added bestiary tab to pv (#177)
* some progress - missing be mobs from crimson and dungeons
* fix id's
* works :tm:
* working mobs levels (for hub and island because lazy
* idk seems right
* Added bestiary texture, fixed name bug, added changelog
i hope i didnt mess up anything.
* i did smth
* use 2.1.md from master because yes
* L europe
* Finished be tab
* hi im just putting it here because too lazy to open a pr for this
k, thanks bye
* unbelievable
* fix crash
* fix crash with old repo
* stfu infer
* europe wins
* l o n g
Co-authored-by: jani270 <jani270@gmx.de> | https://github.com/Moulberry/NotEnoughUpdates/commit/6e40cd5d2426eee681b091acc6e0822d4d7c45b5 | null | null | src/main/java/io/github/moulberry/notenoughupdates/listener/ItemTooltipListener.java | 0 | java | false | 2022-07-07T16:07:38Z |
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
Log.e("WallpaperService", "onStartCommand");
startForeground(3000, new NotificationCompat.Builder(this, "4000")
.setSmallIcon(dev.oneuiproject.oneui.R.drawable.ic_oui_wallpaper_outline)
.setContentTitle(getString(R.string.wallpaper_service))
.setContentText(getString(R.string.noti_desc))
.setContentIntent(PendingIntent.getActivity(
WallpaperService.this,
0,
new Intent(Settings.ACTION_CHANNEL_NOTIFICATION_SETTINGS).putExtra(Settings.EXTRA_APP_PACKAGE, getPackageName()).putExtra(Settings.EXTRA_CHANNEL_ID, "4000"),
PendingIntent.FLAG_UPDATE_CURRENT | PendingIntent.FLAG_IMMUTABLE))
.setVisibility(NotificationCompat.VISIBILITY_SECRET)
.build());
return START_STICKY;
} | @Override
public int onStartCommand(Intent intent, int flags, int startId) {
Log.e("WallpaperService", "onStartCommand");
startForeground(3000, new NotificationCompat.Builder(this, "4000")
.setSmallIcon(R.drawable.ic_oui_wallpaper_outline)
.setContentTitle(getString(R.string.wallpaper_service))
.setContentText(getString(R.string.noti_desc))
.setContentIntent(PendingIntent.getActivity(
WallpaperService.this,
0,
new Intent(Settings.ACTION_CHANNEL_NOTIFICATION_SETTINGS).putExtra(Settings.EXTRA_APP_PACKAGE, getPackageName()).putExtra(Settings.EXTRA_CHANNEL_ID, "4000"),
PendingIntent.FLAG_UPDATE_CURRENT | PendingIntent.FLAG_IMMUTABLE))
.setVisibility(NotificationCompat.VISIBILITY_SECRET)
.build());
return START_STICKY;
} | app: v1.1.1
- fix crash with big images
- bump dependencies | https://github.com/Yanndroid/DualWallpaper/commit/e0581767be1ff0030ef182545dcb22f72dbf2bec | null | null | app/src/main/java/de/dlyt/yanndroid/dualwallpaper/WallpaperService.java | 0 | java | false | 2022-07-29T13:14:03Z |
@Override
public boolean gatherDictionaryValuesIndex(@NotNull final ReadOnlyIndex.SearchIterator keysToVisit,
@NotNull final OrderedKeys.Iterator knownKeys,
@NotNull final Index.SequentialBuilder sequentialBuilder) {
final long pageFirstKey = firstRow(keysToVisit.currentValue());
final long pageLastKey = pageFirstKey + dictionary.size() - 1;
if (knownKeys.peekNextKey() != pageFirstKey) {
// We need to add the entire page
sequentialBuilder.appendRange(pageFirstKey, pageLastKey);
advanceToNextPage(knownKeys);
} else {
final long knownSize = advanceToNextPageAndGetPositionDistance(knownKeys);
if (knownSize != dictionary.size()) {
sequentialBuilder.appendRange(pageFirstKey + knownSize, pageLastKey);
}
}
return advanceToNextPage(keysToVisit);
} | @Override
public boolean gatherDictionaryValuesIndex(
@NotNull final ReadOnlyIndex.SearchIterator keysToVisit,
@NotNull final OrderedKeys.Iterator knownKeys,
@NotNull final Index.SequentialBuilder sequentialBuilder) {
final long dictSize = getDictionaryChunk().size();
final long pageFirstKey = firstRow(keysToVisit.currentValue());
final long pageLastKey = pageFirstKey + dictSize - 1;
if (knownKeys.peekNextKey() != pageFirstKey) {
// We need to add the entire page
sequentialBuilder.appendRange(pageFirstKey, pageLastKey);
advanceToNextPage(knownKeys);
} else {
final long knownSize = advanceToNextPageAndGetPositionDistance(knownKeys);
if (knownSize != dictSize) {
sequentialBuilder.appendRange(pageFirstKey + knownSize, pageLastKey);
}
}
return advanceToNextPage(keysToVisit);
} | QST to graphviz DOT format; and SVG, PNG, and others (#935) | https://github.com/deephaven/deephaven-core/commit/7b2eacfb6aeb9ec889ca6bba02146d737856ed4c | null | null | DB/src/main/java/io/deephaven/db/v2/sources/regioned/ColumnRegionChunkDictionary.java | 0 | java | false | 2021-08-26T19:49:30Z |
public synchronized List<InformationLeakTest<DirectRaccoonOracleTestInfo>>
getRaccoonTestResultList() {
@SuppressWarnings("unchecked")
ListResult<InformationLeakTest<DirectRaccoonOracleTestInfo>> listResult =
(ListResult<InformationLeakTest<DirectRaccoonOracleTestInfo>>)
getListResult(TlsAnalyzedProperty.DIRECTRACCOON_TEST_RESULT);
return listResult == null ? null : listResult.getList();
} | public synchronized List<InformationLeakTest<DirectRaccoonOracleTestInfo>>
getRaccoonTestResultList() {
@SuppressWarnings("unchecked")
ListResult<InformationLeakTest<DirectRaccoonOracleTestInfo>> listResult =
(ListResult<InformationLeakTest<DirectRaccoonOracleTestInfo>>)
getListResult(TlsAnalyzedProperty.DIRECT_RACCOON_TEST_RESULT);
return listResult == null ? null : listResult.getList();
} | renaming of attack properties in respective classes | https://github.com/tls-attacker/TLS-Scanner/commit/6bb6a655d6e49315120585db5ee7bd1c0baf7683 | null | null | TLS-Server-Scanner/src/main/java/de/rub/nds/tlsscanner/serverscanner/report/ServerReport.java | 0 | java | false | null |
static void alpha_perf_event_irq_handler(unsigned long la_ptr,
struct pt_regs *regs)
{
struct cpu_hw_events *cpuc;
struct perf_sample_data data;
struct perf_event *event;
struct hw_perf_event *hwc;
int idx, j;
__get_cpu_var(irq_pmi_count)++;
cpuc = &__get_cpu_var(cpu_hw_events);
/* Completely counting through the PMC's period to trigger a new PMC
* overflow interrupt while in this interrupt routine is utterly
* disastrous! The EV6 and EV67 counters are sufficiently large to
* prevent this but to be really sure disable the PMCs.
*/
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
/* la_ptr is the counter that overflowed. */
if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: silly index %ld\n", la_ptr);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
idx = la_ptr;
perf_sample_data_init(&data, 0);
for (j = 0; j < cpuc->n_events; j++) {
if (cpuc->current_idx[j] == idx)
break;
}
if (unlikely(j == cpuc->n_events)) {
/* This can occur if the event is disabled right on a PMC overflow. */
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
event = cpuc->event[j];
if (unlikely(!event)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: No event at index %d!\n", idx);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
hwc = &event->hw;
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
data.period = event->hw.last_period;
if (alpha_perf_event_set_period(event, hwc, idx)) {
if (perf_event_overflow(event, 1, &data, regs)) {
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
alpha_pmu_stop(event, 0);
}
}
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
} | static void alpha_perf_event_irq_handler(unsigned long la_ptr,
struct pt_regs *regs)
{
struct cpu_hw_events *cpuc;
struct perf_sample_data data;
struct perf_event *event;
struct hw_perf_event *hwc;
int idx, j;
__get_cpu_var(irq_pmi_count)++;
cpuc = &__get_cpu_var(cpu_hw_events);
/* Completely counting through the PMC's period to trigger a new PMC
* overflow interrupt while in this interrupt routine is utterly
* disastrous! The EV6 and EV67 counters are sufficiently large to
* prevent this but to be really sure disable the PMCs.
*/
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
/* la_ptr is the counter that overflowed. */
if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: silly index %ld\n", la_ptr);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
idx = la_ptr;
perf_sample_data_init(&data, 0);
for (j = 0; j < cpuc->n_events; j++) {
if (cpuc->current_idx[j] == idx)
break;
}
if (unlikely(j == cpuc->n_events)) {
/* This can occur if the event is disabled right on a PMC overflow. */
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
event = cpuc->event[j];
if (unlikely(!event)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: No event at index %d!\n", idx);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
hwc = &event->hw;
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
data.period = event->hw.last_period;
if (alpha_perf_event_set_period(event, hwc, idx)) {
if (perf_event_overflow(event, &data, regs)) {
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
alpha_pmu_stop(event, 0);
}
}
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
} | perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu> | https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | null | null | arch/alpha/kernel/perf_event.c | 0 | c | false | null |
def __init__(self):
super().__init__()
self._scopes = []
# FLAG cyclic imports!
from vyper.builtin_functions.functions import get_builtin_functions
from vyper.semantics import environment
from vyper.semantics.types import get_types
self.update(get_types())
self.update(environment.get_constant_vars())
self.update(get_builtin_functions()) | def __init__(self):
super().__init__()
self._scopes = []
# NOTE cyclic imports!
from vyper.builtin_functions.functions import get_builtin_functions
from vyper.semantics import environment
from vyper.semantics.types import get_types
self.update(get_types())
self.update(environment.get_constant_vars())
self.update(get_builtin_functions()) | Merge branch 'master' into fix/dup_storage_slots | https://github.com/vyperlang/vyper/commit/7fc2d1cd39560543bc1133862085f9d8e245d859 | null | null | vyper/semantics/namespace.py | 0 | py | false | 2021-10-24T01:39:16Z |
def get_fields(self, table_data: TableData, path_spec: PathSpec) -> List:
if self.is_s3_platform():
if self.source_config.aws_config is None:
raise ValueError("AWS config is required for S3 file sources")
s3_client = self.source_config.aws_config.get_s3_client(
self.source_config.verify_ssl
)
file = smart_open(
table_data.full_path, "rb", transport_params={"client": s3_client}
)
else:
# We still use smart_open here to take advantage of the compression
# capabilities of smart_open.
file = smart_open(table_data.full_path, "rb")
fields = []
extension = pathlib.Path(table_data.full_path).suffix
from datahub.ingestion.source.data_lake_common.path_spec import (
SUPPORTED_COMPRESSIONS,
)
if path_spec.enable_compression and (extension[1:] in SUPPORTED_COMPRESSIONS):
# Removing the compression extension and using the one before that like .json.gz -> .json
extension = pathlib.Path(table_data.full_path).with_suffix("").suffix
if extension == "" and path_spec.default_extension:
extension = f".{path_spec.default_extension}"
try:
if extension == ".parquet":
fields = parquet.ParquetInferrer().infer_schema(file)
elif extension == ".csv":
fields = csv_tsv.CsvInferrer(
max_rows=self.source_config.max_rows
).infer_schema(file)
elif extension == ".tsv":
fields = csv_tsv.TsvInferrer(
max_rows=self.source_config.max_rows
).infer_schema(file)
elif extension == ".json":
fields = json.JsonInferrer().infer_schema(file)
elif extension == ".avro":
fields = avro.AvroInferrer().infer_schema(file)
else:
self.report.report_warning(
table_data.full_path,
f"file {table_data.full_path} has unsupported extension",
)
file.close()
except Exception as e:
self.report.report_warning(
table_data.full_path,
f"could not infer schema for file {table_data.full_path}: {e}",
)
file.close()
logger.debug(f"Extracted fields in schema: {fields}")
fields = sorted(fields, key=lambda f: f.fieldPath)
return fields | def get_fields(self, table_data: TableData, path_spec: PathSpec) -> List:
if self.is_s3_platform():
if self.source_config.aws_config is None:
raise ValueError("AWS config is required for S3 file sources")
s3_client = self.source_config.aws_config.get_s3_client(
self.source_config.verify_ssl
)
file = smart_open(
table_data.full_path, "rb", transport_params={"client": s3_client}
)
else:
# We still use smart_open here to take advantage of the compression
# capabilities of smart_open.
file = smart_open(table_data.full_path, "rb")
fields = []
extension = pathlib.Path(table_data.full_path).suffix
from datahub.ingestion.source.data_lake_common.path_spec import (
SUPPORTED_COMPRESSIONS,
)
if path_spec.enable_compression and (extension[1:] in SUPPORTED_COMPRESSIONS):
# Removing the compression extension and using the one before that like .json.gz -> .json
extension = pathlib.Path(table_data.full_path).with_suffix("").suffix
if extension == "" and path_spec.default_extension:
extension = f".{path_spec.default_extension}"
try:
if extension == ".parquet":
fields = parquet.ParquetInferrer().infer_schema(file)
elif extension == ".csv":
fields = csv_tsv.CsvInferrer(
max_rows=self.source_config.max_rows
).infer_schema(file)
elif extension == ".tsv":
fields = csv_tsv.TsvInferrer(
max_rows=self.source_config.max_rows
).infer_schema(file)
elif extension == ".json":
fields = json.JsonInferrer().infer_schema(file)
elif extension == ".avro":
fields = avro.AvroInferrer().infer_schema(file)
else:
self.report.report_warning(
table_data.full_path,
f"file {table_data.full_path} has unsupported extension",
)
file.close()
except Exception as e:
self.report.report_warning(
table_data.full_path,
f"could not infer schema for file {table_data.full_path}: {e}",
)
file.close()
logger.debug(f"Extracted fields in schema: {fields}")
fields = sorted(fields, key=lambda f: f.fieldPath)
if self.source_config.add_partition_columns_to_schema:
self.add_partition_columns_to_schema(
fields=fields, path_spec=path_spec, full_path=table_data.full_path
)
return fields | Merge branch 'master' into feat/policyFixes | https://github.com/datahub-project/datahub/commit/395a1bdeb8dfc62df94a10a3bc38b085d082feca | null | null | metadata-ingestion/src/datahub/ingestion/source/s3/source.py | 0 | py | false | 2023-11-03T15:19:26Z |
function setUsedToken(authzToken) {
try {
const { meetingId, requesterUserId } = extractCredentials(this.userId);
check(meetingId, String);
check(requesterUserId, String);
check(authzToken, String);
const payload = {
$set: {
used: true,
},
};
const numberAffected = PresentationUploadToken.update({
meetingId,
userId: requesterUserId,
authzToken,
}, payload);
if (numberAffected) {
Logger.info(`Token: ${authzToken} has been set as used in meeting=${meetingId}`);
}
} catch (err) {
Logger.error(`Exception while invoking method setUsedToken ${err.stack}`);
}
} | async function setUsedToken(authzToken) {
try {
const { meetingId, requesterUserId } = extractCredentials(this.userId);
check(meetingId, String);
check(requesterUserId, String);
check(authzToken, String);
const payload = {
$set: {
used: true,
},
};
const numberAffected = await PresentationUploadToken.updateAsync({
meetingId,
userId: requesterUserId,
authzToken,
}, payload);
if (numberAffected) {
Logger.info(`Token: ${authzToken} has been set as used in meeting=${meetingId}`);
}
} catch (err) {
Logger.error(`Exception while invoking method setUsedToken ${err.stack}`);
}
} | Merge branch 'v2.6.x-release' of github.com:bigbluebutton/bigbluebutton into ssrf-fix | https://github.com/bigbluebutton/bigbluebutton/commit/22de2b49a5d218910923a1048bb73395e53c99bf | CVE-2023-33176 | ['CWE-918'] | bigbluebutton-html5/imports/api/presentation-upload-token/server/methods/setUsedToken.js | 0 | js | false | 2023-04-13T12:40:07Z |
@Override
public Class<?> visitArray(Type.Array t, Void aVoid) {
if (t.kind() == Type.Array.Kind.VECTOR) {
throw new UnsupportedOperationException();
} else {
return MemorySegment.class;
}
} | @Override
public Class<?> visitArray(Type.Array t, Void aVoid) {
if (t.kind() == Type.Array.Kind.VECTOR) {
throw new UnsupportedOperationException("vector");
} else {
return MemorySegment.class;
}
} | Automatic merge of foreign-memaccess+abi into foreign-jextract | https://github.com/openjdk/panama-foreign/commit/7ef6be5d209b9074a4b10c0d34fc9075e380f8be | null | null | src/jdk.incubator.jextract/share/classes/jdk/internal/jextract/impl/TypeTranslator.java | 0 | java | false | 2021-02-18T13:25:53Z |
function Model() {
var _this = this;
Object(_var_www_lime25_limesurvey_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_classCallCheck__WEBPACK_IMPORTED_MODULE_5__["default"])(this, Model);
/**
* Model's marker collection.
*
* @readonly
* @member {module:engine/model/markercollection~MarkerCollection}
*/
this.markers = new _markercollection__WEBPACK_IMPORTED_MODULE_11__["default"]();
/**
* Model's document.
*
* @readonly
* @member {module:engine/model/document~Document}
*/
this.document = new _document__WEBPACK_IMPORTED_MODULE_10__["default"](this);
/**
* Model's schema.
*
* @readonly
* @member {module:engine/model/schema~Schema}
*/
this.schema = new _schema__WEBPACK_IMPORTED_MODULE_9__["default"]();
/**
* All callbacks added by {@link module:engine/model/model~Model#change} or
* {@link module:engine/model/model~Model#enqueueChange} methods waiting to be executed.
*
* @private
* @type {Array.<Function>}
*/
this._pendingChanges = [];
/**
* The last created and currently used writer instance.
*
* @private
* @member {module:engine/model/writer~Writer}
*/
this._currentWriter = null;
['insertContent', 'deleteContent', 'modifySelection', 'getSelectedContent', 'applyOperation'].forEach(function (methodName) {
return _this.decorate(methodName);
}); // Adding operation validation with `highest` priority, so it is called before any other feature would like
// to do anything with the operation. If the operation has incorrect parameters it should throw on the earliest occasion.
this.on('applyOperation', function (evt, args) {
var operation = args[0];
operation._validate();
}, {
priority: 'highest'
}); // Register some default abstract entities.
this.schema.register('$root', {
isLimit: true
});
this.schema.register('$block', {
allowIn: '$root',
isBlock: true
});
this.schema.register('$text', {
allowIn: '$block',
isInline: true
});
this.schema.register('$clipboardHolder', {
allowContentOf: '$root',
isLimit: true
});
this.schema.extend('$text', {
allowIn: '$clipboardHolder'
}); // An element needed by the `upcastElementToMarker` converter.
// This element temporarily represents a marker boundary during the conversion process and is removed
// at the end of the conversion. `UpcastDispatcher` or at least `Conversion` class looks like a
// better place for this registration but both know nothing about `Schema`.
this.schema.register('$marker');
this.schema.addChildCheck(function (context, childDefinition) {
if (childDefinition.name === '$marker') {
return true;
}
});
Object(_utils_selection_post_fixer__WEBPACK_IMPORTED_MODULE_22__["injectSelectionPostFixer"])(this);
} | function Model() {
var _this = this;
Object(_var_www_limedev_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_classCallCheck__WEBPACK_IMPORTED_MODULE_5__["default"])(this, Model);
/**
* Model's marker collection.
*
* @readonly
* @member {module:engine/model/markercollection~MarkerCollection}
*/
this.markers = new _markercollection__WEBPACK_IMPORTED_MODULE_11__["default"]();
/**
* Model's document.
*
* @readonly
* @member {module:engine/model/document~Document}
*/
this.document = new _document__WEBPACK_IMPORTED_MODULE_10__["default"](this);
/**
* Model's schema.
*
* @readonly
* @member {module:engine/model/schema~Schema}
*/
this.schema = new _schema__WEBPACK_IMPORTED_MODULE_9__["default"]();
/**
* All callbacks added by {@link module:engine/model/model~Model#change} or
* {@link module:engine/model/model~Model#enqueueChange} methods waiting to be executed.
*
* @private
* @type {Array.<Function>}
*/
this._pendingChanges = [];
/**
* The last created and currently used writer instance.
*
* @private
* @member {module:engine/model/writer~Writer}
*/
this._currentWriter = null;
['insertContent', 'deleteContent', 'modifySelection', 'getSelectedContent', 'applyOperation'].forEach(function (methodName) {
return _this.decorate(methodName);
}); // Adding operation validation with `highest` priority, so it is called before any other feature would like
// to do anything with the operation. If the operation has incorrect parameters it should throw on the earliest occasion.
this.on('applyOperation', function (evt, args) {
var operation = args[0];
operation._validate();
}, {
priority: 'highest'
}); // Register some default abstract entities.
this.schema.register('$root', {
isLimit: true
});
this.schema.register('$block', {
allowIn: '$root',
isBlock: true
});
this.schema.register('$text', {
allowIn: '$block',
isInline: true
});
this.schema.register('$clipboardHolder', {
allowContentOf: '$root',
isLimit: true
});
this.schema.extend('$text', {
allowIn: '$clipboardHolder'
}); // An element needed by the `upcastElementToMarker` converter.
// This element temporarily represents a marker boundary during the conversion process and is removed
// at the end of the conversion. `UpcastDispatcher` or at least `Conversion` class looks like a
// better place for this registration but both know nothing about `Schema`.
this.schema.register('$marker');
this.schema.addChildCheck(function (context, childDefinition) {
if (childDefinition.name === '$marker') {
return true;
}
});
Object(_utils_selection_post_fixer__WEBPACK_IMPORTED_MODULE_22__["injectSelectionPostFixer"])(this);
} | DEV: changed routes in vue.js part questiongroupedit.js and deleted old controller+views | https://github.com/LimeSurvey/LimeSurvey/commit/d7a309bf1a73e95187528982b9db7ec03fee6913 | CVE-2020-23710 | ['CWE-79'] | assets/packages/questiongroup/build/js/questiongroupedit.js | 0 | js | false | 2020-06-08T09:23:29Z |
def _process_sql_expression(
expression: Optional[str],
database_id: int,
schema: str,
template_processor: Optional[BaseTemplateProcessor] = None,
) -> Optional[str]:
if template_processor and expression:
expression = template_processor.process_template(expression)
if expression:
try:
expression = validate_adhoc_subquery(
expression,
database_id,
schema,
)
expression = sanitize_clause(expression)
except (QueryClauseValidationException, SupersetSecurityException) as ex:
raise QueryObjectValidationError(ex.message) from ex
return expression | def _process_sql_expression(
expression: str | None,
database_id: int,
schema: str,
template_processor: BaseTemplateProcessor | None = None,
) -> str | None:
if template_processor and expression:
expression = template_processor.process_template(expression)
if expression:
try:
expression = validate_adhoc_subquery(
expression,
database_id,
schema,
)
expression = sanitize_clause(expression)
except (QueryClauseValidationException, SupersetSecurityException) as ex:
raise QueryObjectValidationError(ex.message) from ex
return expression | Merge branch 'master' into fix/db-val-param-perms | https://github.com/apache/superset/commit/4e2fd6f4f04c61e8c1d3ec3f233581a05f8b6213 | null | null | superset/connectors/sqla/models.py | 0 | py | false | 2023-06-05T08:42:54Z |
@SubscribeEvent(priority = EventPriority.LOWEST)
public static void onGlideTick(TickEvent.PlayerTickEvent event){
if(ArsNouveau.caelusLoaded && event.player.hasEffect(ModPotions.GLIDE_EFFECT))
CaelusHandler.setFlying(event.player);
} | @SubscribeEvent(priority = EventPriority.LOWEST)
public static void onGlideTick(TickEvent.PlayerTickEvent event){
if(ArsNouveau.caelusLoaded && event.player.hasEffect(ModPotions.GLIDE_EFFECT))
CaelusHandler.setFlying(event.player);
// if(!event.player.level.isClientSide && event.player.hasEffect(ModPotions.SHOCKED_EFFECT)){
// event.player.hurt(DamageSource.LIGHTNING_BOLT.bypassArmor().bypassMagic().bypassInvul(), 1.0f);
// }
} | Block piston movement for quark | https://github.com/baileyholl/Ars-Nouveau/commit/a49fac12cbf6cf7c65c2dc85901803e44b35bb03 | null | null | src/main/java/com/hollingsworth/arsnouveau/common/event/EventHandler.java | 0 | java | false | 2021-06-23T01:12:10Z |
@Override
public CatalogContext catalogUpdate(
String diffCommands,
int expectedCatalogVersion,
int nextCatalogVersion,
long genId,
boolean isForReplay,
boolean requireCatalogDiffCmdsApplyToEE,
boolean hasSchemaChange,
boolean requiresNewExportGeneration,
boolean hasSecurityUserChange,
Consumer<Map<Byte, String[]>> replicableTablesConsumer)
{
try {
/*
* Synchronize updates of catalog contexts across the multiple sites on this host. Ensure that catalogUpdate() is
* only performed after all sites reach catalogUpdate(). Once all sites have reached this point the first site to
* execute will perform the actual update while the others wait.
*/
final UpdatableSiteCoordinationBarrier sysProcBarrier = VoltDB.getSiteCountBarrier();
sysProcBarrier.await();
synchronized (sysProcBarrier) {
if (m_catalogContext.catalogVersion != expectedCatalogVersion) {
if (m_catalogContext.catalogVersion < expectedCatalogVersion) {
throw new RuntimeException("Trying to update main catalog context with diff " +
"commands generated for an out-of date catalog. Expected catalog version: " +
expectedCatalogVersion + " does not match actual version: " + m_catalogContext.catalogVersion);
}
assert(m_catalogContext.catalogVersion == nextCatalogVersion);
}
else {
final NodeState prevNodeState = m_statusTracker.set(NodeState.UPDATING);
try {
doCatalogUpdate(diffCommands, nextCatalogVersion, genId, isForReplay,
requireCatalogDiffCmdsApplyToEE, hasSchemaChange,
requiresNewExportGeneration, hasSecurityUserChange,
replicableTablesConsumer);
}
finally {
m_statusTracker.set(prevNodeState);
}
}
}
} catch (InterruptedException | BrokenBarrierException e) {
throw VoltDB.crashLocalVoltDB("Error waiting for barrier", true, e);
}
return m_catalogContext;
} | @Override
public CatalogContext catalogUpdate(
String diffCommands,
int expectedCatalogVersion,
int nextCatalogVersion,
long genId,
boolean isForReplay,
boolean requireCatalogDiffCmdsApplyToEE,
boolean hasSchemaChange,
boolean requiresNewExportGeneration,
boolean hasSecurityUserChange,
Consumer<Map<Byte, String[]>> replicableTablesConsumer)
{
try {
/*
* Synchronize updates of catalog contexts across the multiple sites on this host. Ensure that catalogUpdate() is
* only performed after all sites reach catalogUpdate(). Once all sites have reached this point the first site to
* execute will perform the actual update while the others wait.
*/
final UpdatableSiteCoordinationBarrier sysProcBarrier = VoltDB.getSiteCountBarrier();
sysProcBarrier.await();
synchronized (sysProcBarrier) {
if (m_catalogContext.catalogVersion != expectedCatalogVersion) {
if (m_catalogContext.catalogVersion < expectedCatalogVersion) {
throw new RuntimeException("Trying to update main catalog context with diff " +
"commands generated for an out-of date catalog. Expected catalog version: " +
expectedCatalogVersion + " does not match actual version: " + m_catalogContext.catalogVersion);
}
assert(m_catalogContext.catalogVersion == nextCatalogVersion);
}
else {
final NodeState prevNodeState = m_statusTracker.set(NodeState.UPDATING);
try {
doCatalogUpdate(diffCommands, nextCatalogVersion, genId, isForReplay,
requireCatalogDiffCmdsApplyToEE, hasSchemaChange,
requiresNewExportGeneration, hasSecurityUserChange,
replicableTablesConsumer);
}
finally {
m_statusTracker.set(prevNodeState);
}
}
}
} catch (InterruptedException | BrokenBarrierException e) {
VoltDB.crashLocalVoltDB("Error waiting for barrier", true, e);
}
return m_catalogContext;
} | ENG-22241, skip crash file in junit test (#1172)
VoltDB.crashLocalVoltDB said it was declining to write a crash file in a junit test, and then did it anyway.
Fixed now. We actually decline to write the crash file in a junit test. All other crash actions still apply.
Other sundry cleanup.
- Move crash file writer to a separate private method (makes skipping the call easier to read)
- Fix bogus claim that crashLocalVoltDB returns an exception object
- Replace a few '3-arg' calls with a simpler '1-arg' call where there's only a message text
- Add the missing 2-arg variant to complement the 1, 3, and 4-arg ones we have
- Replace the "get all thread stacktraces and then ignore all but the current thread" call
. Make private some methods that had no need to be public | https://github.com/VoltDB/voltdb/commit/11c1626969f246f2c426db29a2c7b5f46b4d4780 | null | null | src/frontend/org/voltdb/RealVoltDB.java | 0 | java | false | 2022-03-21T17:28:28Z |
def generate_openlineage_events_from_dbt_cloud_run(
operator: DbtCloudRunJobOperator | DbtCloudJobRunSensor, task_instance: TaskInstance
) -> OperatorLineage:
"""
Generate OpenLineage events from the DBT Cloud run.
This function retrieves information about a DBT Cloud run, including the associated job,
project, and execution details. It processes the run's artifacts, such as the manifest and run results,
in parallel for many steps.
Then it generates and emits OpenLineage events based on the executed DBT tasks.
:param operator: Instance of DBT Cloud operator that executed DBT tasks.
It already should have run_id and dbt cloud hook.
:param task_instance: Currently executed task instance
:return: An empty OperatorLineage object indicating the completion of events generation.
"""
from openlineage.common.provider.dbt import DbtCloudArtifactProcessor, ParentRunMetadata
from airflow.providers.openlineage.extractors import OperatorLineage
from airflow.providers.openlineage.plugins.adapter import (
_DAG_NAMESPACE,
_PRODUCER,
OpenLineageAdapter,
)
from airflow.providers.openlineage.plugins.listener import get_openlineage_listener
# if no account_id set this will fallback
job_run = operator.hook.get_job_run(
run_id=operator.run_id, account_id=operator.account_id, include_related=["run_steps,job"]
).json()["data"]
job = job_run["job"]
# retrieve account_id from job and use that starting from this line
account_id = job["account_id"]
project = operator.hook.get_project(project_id=job["project_id"], account_id=account_id).json()["data"]
connection = project["connection"]
execute_steps = job["execute_steps"]
run_steps = job_run["run_steps"]
# filter only dbt invocation steps
steps = []
for run_step in run_steps:
name = run_step["name"]
if name.startswith("Invoke dbt with `"):
regex_pattern = "Invoke dbt with `([^`.]*)`"
m = re.search(regex_pattern, name)
if m and m.group(1) in execute_steps:
steps.append(run_step["index"])
# catalog is available only if docs are generated
catalog = None
with suppress(Exception):
catalog = operator.hook.get_job_run_artifact(operator.run_id, path="catalog.json").json()["data"]
async def get_artifacts_for_steps(steps, artifacts):
"""Get artifacts for a list of steps concurrently."""
tasks = [
operator.hook.get_job_run_artifacts_concurrently(
run_id=operator.run_id,
account_id=account_id,
step=step,
artifacts=artifacts,
)
for step in steps
]
return await asyncio.gather(*tasks)
# get artifacts for steps concurrently
step_artifacts = asyncio.run(
get_artifacts_for_steps(steps=steps, artifacts=["manifest.json", "run_results.json"])
)
# process each step in loop, sending generated events in the same order as steps
for artifacts in step_artifacts:
# process manifest
manifest = artifacts["manifest.json"]
if not artifacts.get("run_results.json", None):
continue
processor = DbtCloudArtifactProcessor(
producer=_PRODUCER,
job_namespace=_DAG_NAMESPACE,
skip_errors=False,
logger=operator.log,
manifest=manifest,
run_result=artifacts["run_results.json"],
profile=connection,
catalog=catalog,
)
# generate same run id of current task instance
parent_run_id = OpenLineageAdapter.build_task_instance_run_id(
operator.task_id, task_instance.execution_date, task_instance.try_number - 1
)
parent_job = ParentRunMetadata(
run_id=parent_run_id,
job_name=f"{task_instance.dag_id}.{task_instance.task_id}",
job_namespace=_DAG_NAMESPACE,
)
processor.dbt_run_metadata = parent_job
events = processor.parse().events()
client = get_openlineage_listener().adapter.get_or_create_openlineage_client()
for event in events:
client.emit(event=event)
return OperatorLineage() | def generate_openlineage_events_from_dbt_cloud_run(
operator: DbtCloudRunJobOperator | DbtCloudJobRunSensor, task_instance: TaskInstance
) -> OperatorLineage:
"""
Generate OpenLineage events from the DBT Cloud run.
This function retrieves information about a DBT Cloud run, including the associated job,
project, and execution details. It processes the run's artifacts, such as the manifest and run results,
in parallel for many steps.
Then it generates and emits OpenLineage events based on the executed DBT tasks.
:param operator: Instance of DBT Cloud operator that executed DBT tasks.
It already should have run_id and dbt cloud hook.
:param task_instance: Currently executed task instance
:return: An empty OperatorLineage object indicating the completion of events generation.
"""
from openlineage.common.provider.dbt import DbtCloudArtifactProcessor, ParentRunMetadata
from airflow.providers.openlineage.extractors import OperatorLineage
from airflow.providers.openlineage.plugins.adapter import (
_DAG_NAMESPACE,
_PRODUCER,
OpenLineageAdapter,
)
from airflow.providers.openlineage.plugins.listener import get_openlineage_listener
# if no account_id set this will fallback
job_run = operator.hook.get_job_run(
run_id=operator.run_id, account_id=operator.account_id, include_related=["run_steps,job"]
).json()["data"]
job = job_run["job"]
# retrieve account_id from job and use that starting from this line
account_id = job["account_id"]
project = operator.hook.get_project(project_id=job["project_id"], account_id=account_id).json()["data"]
connection = project["connection"]
execute_steps = job["execute_steps"]
run_steps = job_run["run_steps"]
# filter only dbt invocation steps
steps = []
for run_step in run_steps:
name = run_step["name"]
if name.startswith("Invoke dbt with `"):
regex_pattern = "Invoke dbt with `([^`.]*)`"
m = re.search(regex_pattern, name)
if m and m.group(1) in execute_steps:
steps.append(run_step["index"])
# catalog is available only if docs are generated
catalog = None
with suppress(Exception):
catalog = operator.hook.get_job_run_artifact(operator.run_id, path="catalog.json").json()["data"]
async def get_artifacts_for_steps(steps, artifacts):
"""Get artifacts for a list of steps concurrently."""
tasks = [
operator.hook.get_job_run_artifacts_concurrently(
run_id=operator.run_id,
account_id=account_id,
step=step,
artifacts=artifacts,
)
for step in steps
]
return await asyncio.gather(*tasks)
# get artifacts for steps concurrently
step_artifacts = asyncio.run(
get_artifacts_for_steps(steps=steps, artifacts=["manifest.json", "run_results.json"])
)
# process each step in loop, sending generated events in the same order as steps
for artifacts in step_artifacts:
# process manifest
manifest = artifacts["manifest.json"]
if not artifacts.get("run_results.json", None):
continue
processor = DbtCloudArtifactProcessor(
producer=_PRODUCER,
job_namespace=_DAG_NAMESPACE,
skip_errors=False,
logger=operator.log,
manifest=manifest,
run_result=artifacts["run_results.json"],
profile=connection,
catalog=catalog,
)
# generate same run id of current task instance
parent_run_id = OpenLineageAdapter.build_task_instance_run_id(
dag_id=task_instance.dag_id,
task_id=operator.task_id,
execution_date=task_instance.execution_date,
try_number=task_instance.try_number - 1,
)
parent_job = ParentRunMetadata(
run_id=parent_run_id,
job_name=f"{task_instance.dag_id}.{task_instance.task_id}",
job_namespace=_DAG_NAMESPACE,
)
processor.dbt_run_metadata = parent_job
events = processor.parse().events()
client = get_openlineage_listener().adapter.get_or_create_openlineage_client()
for event in events:
client.emit(event=event)
return OperatorLineage() | Merge branch 'main' into trigger_encryption | https://github.com/apache/airflow/commit/5396e4a482cce2d283bed13cf8039e3239d98438 | null | null | airflow/providers/dbt/cloud/utils/openlineage.py | 0 | py | false | 2024-01-09T22:13:52Z |
private RSyntaxTextArea createSmartTextEditor(String templateYaml) {
// TODO https://github.com/bobbylight/RSyntaxTextArea/issues/269
JTextComponent.removeKeymap("RTextAreaKeymap");
UIManager.put("RSyntaxTextAreaUI.actionMap", null);
UIManager.put("RSyntaxTextAreaUI.inputMap", null);
UIManager.put("RTextAreaUI.actionMap", null);
UIManager.put("RTextAreaUI.inputMap", null);
final boolean experimental = true; // TODO move to settings
final RSyntaxTextArea textEditor;
if (experimental) {
final AbstractTokenMakerFactory nucleiTokenMakerFactory = new NucleiTokenMakerFactory(this.yamlFieldDescriptionMap.keySet());
textEditor = new RSyntaxTextArea(new RSyntaxDocument(nucleiTokenMakerFactory, NucleiTokenMaker.NUCLEI_YAML_SYNTAX));
} else {
textEditor = new RSyntaxTextArea();
textEditor.setSyntaxEditingStyle(SyntaxConstants.SYNTAX_STYLE_YAML);
}
textEditor.setEditable(true);
textEditor.setAntiAliasingEnabled(true);
textEditor.setTabsEmulated(true);
textEditor.setAutoIndentEnabled(true);
textEditor.setTabSize(2);
textEditor.setText(templateYaml);
setupAutoCompletion(textEditor);
return textEditor;
} | private RSyntaxTextArea createSmartTextEditor(String templateYaml) {
// TODO https://github.com/bobbylight/RSyntaxTextArea/issues/269
JTextComponent.removeKeymap("RTextAreaKeymap");
UIManager.put("RSyntaxTextAreaUI.actionMap", null);
UIManager.put("RSyntaxTextAreaUI.inputMap", null);
UIManager.put("RTextAreaUI.actionMap", null);
UIManager.put("RTextAreaUI.inputMap", null);
final boolean experimental = true; // TODO move to settings
final RSyntaxTextArea textEditor;
if (experimental) {
final AbstractTokenMakerFactory nucleiTokenMakerFactory = new NucleiTokenMakerFactory(this.yamlFieldDescriptionMap.keySet());
textEditor = new RSyntaxTextArea(new RSyntaxDocument(nucleiTokenMakerFactory, NucleiTokenMaker.NUCLEI_YAML_SYNTAX));
} else {
textEditor = new RSyntaxTextArea();
textEditor.setSyntaxEditingStyle(SyntaxConstants.SYNTAX_STYLE_YAML);
}
textEditor.setEditable(true);
textEditor.setAntiAliasingEnabled(true);
textEditor.setTabsEmulated(true);
textEditor.setAutoIndentEnabled(true);
textEditor.setTabSize(2);
textEditor.setText(templateYaml);
textEditor.getPopupMenu().add(createTemplateEditorMenuItems());
setupAutoCompletion(textEditor);
return textEditor;
} | Auto-complete classification information based on CVE id #12 | https://github.com/projectdiscovery/nuclei-burp-plugin/commit/ee7bebf9a6977858a9ea377eaa76c819a0f5053c | null | null | src/main/java/io/projectdiscovery/nuclei/gui/TemplateGeneratorTab.java | 0 | java | false | null |
public FutureJdbi initializeFutureJdbi(DatabaseConfig databaseConfig, String poolName) {
final Jdbi jdbi = initializeJdbi(databaseConfig, poolName);
final Executor dbExecutor = FutureGrpc.initializeExecutor(databaseConfig.threadCount);
return new FutureJdbi(jdbi, dbExecutor);
} | public FutureJdbi initializeFutureJdbi(DatabaseConfig databaseConfig, String poolName) {
final var jdbi = initializeJdbi(databaseConfig, poolName);
final var dbExecutor = FutureGrpc.initializeExecutor(databaseConfig.threadCount);
return new FutureJdbi(jdbi, dbExecutor);
} | Fix code smells suggested by sonarqube part - 6 (#2580)
* Fix security hotspot suggested by sonarqube
* Fix all remaining medium and low security hotspots
* Fix remaining security hotspots
* https://jenkins.dev.verta.ai/job/build/job/autoformat/job/modeldb-common/114/
* Fix code smells suggested by sonarqube part - 1
* Fix code smells suggested by sonarqube part - 2
* Fix code smells suggested by sonarqube part - 3
* Fix code smells suggested by sonarqube part - 4
* Fix code smells suggested by sonarqube part - 5
* Fix code smells suggested by sonarqube part - 6
* fix var at one missing place
* https://jenkins.dev.verta.ai/job/build/job/autoformat/job/modeldb-common/375/
Co-authored-by: Jenkins <jenkins@verta.ai> | https://github.com/VertaAI/modeldb/commit/0be544c24f0b11198f47639e66244cd93bd656d0 | null | null | backend/common/src/main/java/ai/verta/modeldb/common/config/Config.java | 0 | java | false | 2021-09-16T05:41:21Z |
def get_physical_table_metadata(
database: Database,
table_name: str,
schema_name: Optional[str] = None,
) -> List[Dict[str, Any]]:
"""Use SQLAlchemy inspector to get table metadata"""
db_engine_spec = database.db_engine_spec
db_dialect = database.get_dialect()
# ensure empty schema
_schema_name = schema_name if schema_name else None
# Table does not exist or is not visible to a connection.
if not (
database.has_table_by_name(table_name=table_name, schema=_schema_name)
or database.has_view_by_name(view_name=table_name, schema=_schema_name)
):
raise NoSuchTableError
cols = database.get_columns(table_name, schema=_schema_name)
for col in cols:
try:
if isinstance(col["type"], TypeEngine):
db_type = db_engine_spec.column_datatype_to_string(
col["type"], db_dialect
)
type_spec = db_engine_spec.get_column_spec(
db_type, db_extra=database.get_extra()
)
col.update(
{
"type": db_type,
"type_generic": type_spec.generic_type if type_spec else None,
"is_dttm": type_spec.is_dttm if type_spec else None,
}
)
# Broad exception catch, because there are multiple possible exceptions
# from different drivers that fall outside CompileError
except Exception: # pylint: disable=broad-except
col.update(
{
"type": "UNKNOWN",
"type_generic": None,
"is_dttm": None,
}
)
return cols | def get_physical_table_metadata(
database: Database,
table_name: str,
schema_name: str | None = None,
) -> list[dict[str, Any]]:
"""Use SQLAlchemy inspector to get table metadata"""
db_engine_spec = database.db_engine_spec
db_dialect = database.get_dialect()
# ensure empty schema
_schema_name = schema_name if schema_name else None
# Table does not exist or is not visible to a connection.
if not (
database.has_table_by_name(table_name=table_name, schema=_schema_name)
or database.has_view_by_name(view_name=table_name, schema=_schema_name)
):
raise NoSuchTableError
cols = database.get_columns(table_name, schema=_schema_name)
for col in cols:
try:
if isinstance(col["type"], TypeEngine):
db_type = db_engine_spec.column_datatype_to_string(
col["type"], db_dialect
)
type_spec = db_engine_spec.get_column_spec(
db_type, db_extra=database.get_extra()
)
col.update(
{
"type": db_type,
"type_generic": type_spec.generic_type if type_spec else None,
"is_dttm": type_spec.is_dttm if type_spec else None,
}
)
# Broad exception catch, because there are multiple possible exceptions
# from different drivers that fall outside CompileError
except Exception: # pylint: disable=broad-except
col.update(
{
"type": "UNKNOWN",
"type_generic": None,
"is_dttm": None,
}
)
return cols | Merge branch 'master' into fix/db-val-param-perms | https://github.com/apache/superset/commit/4e2fd6f4f04c61e8c1d3ec3f233581a05f8b6213 | null | null | superset/connectors/sqla/utils.py | 0 | py | false | 2023-06-05T08:42:54Z |
function addWebcamSync(meetingId, videoStream) {
check(videoStream, {
userId: String,
stream: String,
name: String,
pin: Boolean,
floor: Boolean,
lastFloorTime: String,
});
const {
stream, userId, name, pin, floor, lastFloorTime,
} = videoStream;
const deviceId = getDeviceId(stream);
const selector = {
meetingId,
userId,
deviceId,
};
const modifier = {
$set: {
stream,
name,
lastFloorTime,
floor,
pin,
},
};
try {
const { insertedId } = VideoStreams.upsert(selector, modifier);
if (insertedId) {
Logger.info(`Synced stream=${stream} meeting=${meetingId}`);
}
} catch (err) {
Logger.error(`Error setting sync stream: ${err}`);
}
} | async function addWebcamSync(meetingId, videoStream) {
check(videoStream, {
userId: String,
stream: String,
name: String,
pin: Boolean,
floor: Boolean,
lastFloorTime: String,
});
const {
stream, userId, name, pin, floor, lastFloorTime,
} = videoStream;
const deviceId = getDeviceId(stream);
const selector = {
meetingId,
userId,
deviceId,
};
const modifier = {
$set: {
stream,
name,
lastFloorTime,
floor,
pin,
},
};
try {
const { insertedId } = await VideoStreams.upsertAsync(selector, modifier);
if (insertedId) {
Logger.info(`Synced stream=${stream} meeting=${meetingId}`);
}
} catch (err) {
Logger.error(`Error setting sync stream: ${err}`);
}
} | Merge branch 'v2.6.x-release' of github.com:bigbluebutton/bigbluebutton into ssrf-fix | https://github.com/bigbluebutton/bigbluebutton/commit/22de2b49a5d218910923a1048bb73395e53c99bf | CVE-2023-33176 | ['CWE-918'] | bigbluebutton-html5/imports/api/video-streams/server/modifiers/sharedWebcam.js | 0 | js | false | 2023-04-13T12:40:07Z |
public boolean isHanging() {
return entity.getAttributeInstance(PlayerAttributes.ENTITY_GRAVTY_MODIFIER).hasModifier(PlayerAttributes.BAT_HANGING);
} | public boolean isHanging() {
return entity.getAttributeInstance(UEntityAttributes.ENTITY_GRAVTY_MODIFIER).hasModifier(PlayerAttributes.BAT_HANGING);
} | Fix server crashes due to late initialisation of registered values | https://github.com/Sollace/Unicopia/commit/cf07f5d341843c669d19760183c9d7254741abd1 | null | null | src/main/java/com/minelittlepony/unicopia/entity/player/Pony.java | 0 | java | false | 2022-08-27T14:37:37Z |
function e(e,i,n){var s=n.get("tags"),o=n.get("createTag");void 0!==o&&(this.createTag=o);var r=n.get("insertTag");if(void 0!==r&&(this.insertTag=r),e.call(this,i,n),t.isArray(s))for(var a=0;a<s.length;a++){var l=s[a],u=this._normalizeItem(l),c=this.option(u);this.$element.append(c)}} | function e(e,i,n){var s=n.get("tags"),r=n.get("createTag");void 0!==r&&(this.createTag=r);var o=n.get("insertTag");if(void 0!==o&&(this.insertTag=o),e.call(this,i,n),t.isArray(s))for(var a=0;a<s.length;a++){var l=s[a],u=this._normalizeItem(l),c=this.option(u);this.$element.append(c)}} | Compiled production assets | https://github.com/snipe/snipe-it/commit/8cfd00d203af61b9140155dab3b8b6e54ba17d8c | CVE-2019-10118 | ['CWE-79'] | public/js/build/all.js | 0 | js | false | null |
function PasteFromOffice() {
Object(_var_www_lime25_limesurvey_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_classCallCheck__WEBPACK_IMPORTED_MODULE_1__["default"])(this, PasteFromOffice);
return Object(_var_www_lime25_limesurvey_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_possibleConstructorReturn__WEBPACK_IMPORTED_MODULE_3__["default"])(this, Object(_var_www_lime25_limesurvey_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_getPrototypeOf__WEBPACK_IMPORTED_MODULE_4__["default"])(PasteFromOffice).apply(this, arguments));
} | function PasteFromOffice() {
Object(_var_www_limedev_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_classCallCheck__WEBPACK_IMPORTED_MODULE_1__["default"])(this, PasteFromOffice);
return Object(_var_www_limedev_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_possibleConstructorReturn__WEBPACK_IMPORTED_MODULE_3__["default"])(this, Object(_var_www_limedev_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_getPrototypeOf__WEBPACK_IMPORTED_MODULE_4__["default"])(PasteFromOffice).apply(this, arguments));
} | DEV: changed routes in vue.js part questiongroupedit.js and deleted old controller+views | https://github.com/LimeSurvey/LimeSurvey/commit/d7a309bf1a73e95187528982b9db7ec03fee6913 | CVE-2020-23710 | ['CWE-79'] | assets/packages/questiongroup/build/js/questiongroupedit.js | 0 | js | false | 2020-06-08T09:23:29Z |
function xr(t,e,n){e=bi(e,t),t=Vo(t,e);var r=null==t?t:t[Jo(ga(e))];return null==r?nt:o(r,t,n)} | function xr(t,e,n){var r=Js;return function i(){null!==e.apply(null,arguments)&&$r(t,i,n,r)}} | Build assets | https://github.com/PrestaShop/PrestaShop/commit/011d8831a9a7b619aecb49208db5d0a36b9677d1 | CVE-2020-6632 | ['CWE-79'] | admin-dev/themes/new-theme/public/translations.bundle.js | 0 | js | false | null |
def from_object(cls, trigger: BaseTrigger) -> Trigger:
"""Alternative constructor that creates a trigger row based directly off of a Trigger object."""
from airflow.models.crypto import get_fernet
classpath, kwargs = trigger.serialize()
secure_kwargs = {}
fernet = get_fernet()
for k, v in kwargs.items():
if k.startswith("encrypted__"):
secure_kwargs[k] = fernet.encrypt(v.encode("utf-8")).decode("utf-8")
else:
secure_kwargs[k] = v
return cls(classpath=classpath, kwargs=secure_kwargs) | def from_object(cls, trigger: BaseTrigger) -> Trigger:
"""Alternative constructor that creates a trigger row based directly off of a Trigger object."""
from airflow.models.crypto import get_fernet
classpath, kwargs = trigger.serialize()
secure_kwargs = {}
fernet = get_fernet()
for k, v in kwargs.items():
if k.startswith(ENCRYPTED_KWARGS_PREFIX):
secure_kwargs[k] = fernet.encrypt(v.encode("utf-8")).decode("utf-8")
else:
secure_kwargs[k] = v
return cls(classpath=classpath, kwargs=secure_kwargs) | Create a variable for encryption prefix | https://github.com/apache/airflow/commit/66e9bfe4262988f2071aa9b54efc5af5b2e4b6db | null | null | airflow/models/trigger.py | 0 | py | false | 2024-01-08T23:53:17Z |
public void onDrawScreenEventPost(ScreenEvent.DrawScreenEvent.Post event) {
Screen screen = event.getScreen();
Minecraft minecraft = screen.getMinecraft();
PoseStack poseStack = event.getPoseStack();
ingredientListOverlay.updateScreen(screen, false);
leftAreaDispatcher.updateScreen(screen, false);
if (!drawnOnBackground) {
if (screen instanceof AbstractContainerScreen) {
String guiName = screen.getClass().getName();
missingBackgroundLogger.log(Level.WARN, guiName, "GUI did not draw the dark background layer behind itself, this may result in display issues: {}", guiName);
}
ingredientListOverlay.drawScreen(minecraft, poseStack, event.getMouseX(), event.getMouseY(), minecraft.getFrameTime());
leftAreaDispatcher.drawScreen(minecraft, poseStack, event.getMouseX(), event.getMouseY(), minecraft.getFrameTime());
}
drawnOnBackground = false;
if (screen instanceof AbstractContainerScreen<?> guiContainer) {
IGuiClickableArea guiClickableArea = guiScreenHelper.getGuiClickableArea(guiContainer, event.getMouseX() - guiContainer.getGuiLeft(), event.getMouseY() - guiContainer.getGuiTop());
if (guiClickableArea != null) {
List<Component> tooltipStrings = guiClickableArea.getTooltipStrings();
if (tooltipStrings.isEmpty()) {
tooltipStrings = Collections.singletonList(new TranslatableComponent("jei.tooltip.show.recipes"));
}
TooltipRenderer.drawHoveringText(poseStack, tooltipStrings, event.getMouseX(), event.getMouseY(), minecraft.font);
}
}
ingredientListOverlay.drawTooltips(minecraft, poseStack, event.getMouseX(), event.getMouseY());
leftAreaDispatcher.drawTooltips(minecraft, poseStack, event.getMouseX(), event.getMouseY());
} | public void onDrawScreenEventPost(ScreenEvent.DrawScreenEvent.Post event) {
Screen screen = event.getScreen();
Minecraft minecraft = screen.getMinecraft();
PoseStack poseStack = event.getPoseStack();
ingredientListOverlay.updateScreen(screen, false);
leftAreaDispatcher.updateScreen(screen, false);
if (!drawnOnBackground) {
if (screen instanceof AbstractContainerScreen) {
String guiName = screen.getClass().getName();
missingBackgroundLogger.log(Level.WARN, guiName, "GUI did not draw the dark background layer behind itself, this may result in display issues: {}", guiName);
}
ingredientListOverlay.drawScreen(minecraft, poseStack, event.getMouseX(), event.getMouseY(), minecraft.getFrameTime());
leftAreaDispatcher.drawScreen(minecraft, poseStack, event.getMouseX(), event.getMouseY(), minecraft.getFrameTime());
}
drawnOnBackground = false;
if (screen instanceof AbstractContainerScreen<?> guiContainer) {
IGuiClickableArea guiClickableArea = guiScreenHelper.getGuiClickableArea(guiContainer, event.getMouseX() - guiContainer.getGuiLeft(), event.getMouseY() - guiContainer.getGuiTop());
if (guiClickableArea != null) {
List<Component> tooltipStrings = guiClickableArea.getTooltipStrings();
if (tooltipStrings.isEmpty()) {
tooltipStrings = Collections.singletonList(new TranslatableComponent("jei.tooltip.show.recipes"));
}
TooltipRenderer.drawHoveringText(poseStack, tooltipStrings, event.getMouseX(), event.getMouseY());
}
}
ingredientListOverlay.drawTooltips(minecraft, poseStack, event.getMouseX(), event.getMouseY());
leftAreaDispatcher.drawTooltips(minecraft, poseStack, event.getMouseX(), event.getMouseY());
} | Fix crash from "Show Recipes" tooltip | https://github.com/mezz/JustEnoughItems/commit/dcd7d2bb9a65ea4204a0a790a05f26fc63106ef1 | null | null | src/main/java/mezz/jei/gui/GuiEventHandler.java | 0 | java | false | 2022-01-03T06:18:00Z |
def trigger(self, dag_id: str, session: Session = NEW_SESSION):
"""Triggers DAG Run."""
run_id = request.values.get("run_id", "").replace(" ", "+")
origin = get_safe_url(request.values.get("origin"))
unpause = request.values.get("unpause")
request_conf = request.values.get("conf")
request_execution_date = request.values.get("execution_date", default=timezone.utcnow().isoformat())
is_dag_run_conf_overrides_params = conf.getboolean("core", "dag_run_conf_overrides_params")
dag = get_airflow_app().dag_bag.get_dag(dag_id)
dag_orm: DagModel = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id).limit(1))
# Prepare form fields with param struct details to render a proper form with schema information
form_fields = {}
for k, v in dag.params.items():
form_fields[k] = v.dump()
# If no schema is provided, auto-detect on default values
if "schema" not in form_fields[k]:
form_fields[k]["schema"] = {}
if "type" not in form_fields[k]["schema"]:
if isinstance(form_fields[k]["value"], bool):
form_fields[k]["schema"]["type"] = "boolean"
elif isinstance(form_fields[k]["value"], int):
form_fields[k]["schema"]["type"] = ["integer", "null"]
elif isinstance(form_fields[k]["value"], list):
form_fields[k]["schema"]["type"] = ["array", "null"]
elif isinstance(form_fields[k]["value"], dict):
form_fields[k]["schema"]["type"] = ["object", "null"]
# Mark markup fields as safe
if (
"description_html" in form_fields[k]["schema"]
and form_fields[k]["schema"]["description_html"]
):
form_fields[k]["description"] = Markup(form_fields[k]["schema"]["description_html"])
if "custom_html_form" in form_fields[k]["schema"]:
form_fields[k]["schema"]["custom_html_form"] = Markup(
form_fields[k]["schema"]["custom_html_form"]
)
ui_fields_defined = any("const" not in f["schema"] for f in form_fields.values())
if not dag_orm:
flash(f"Cannot find dag {dag_id}")
return redirect(origin)
if dag_orm.has_import_errors:
flash(f"Cannot create dagruns because the dag {dag_id} has import errors", "error")
return redirect(origin)
recent_runs = session.execute(
select(DagRun.conf, func.max(DagRun.run_id).label("run_id"), func.max(DagRun.execution_date))
.where(
DagRun.dag_id == dag_id,
DagRun.run_type == DagRunType.MANUAL,
DagRun.conf.isnot(None),
)
.group_by(DagRun.conf)
.order_by(func.max(DagRun.execution_date).desc())
.limit(5)
)
recent_confs = {
run_id: json.dumps(run_conf)
for run_id, run_conf in ((run.run_id, run.conf) for run in recent_runs)
if isinstance(run_conf, dict) and any(run_conf)
}
if request.method == "GET" and ui_fields_defined:
# Populate conf textarea with conf requests parameter, or dag.params
default_conf = ""
doc_md = wwwutils.wrapped_markdown(getattr(dag, "doc_md", None))
form = DateTimeForm(data={"execution_date": request_execution_date})
if request_conf:
default_conf = request_conf
else:
try:
default_conf = json.dumps(
{str(k): v.resolve(suppress_exception=True) for k, v in dag.params.items()},
indent=4,
ensure_ascii=False,
)
except TypeError:
flash("Could not pre-populate conf field due to non-JSON-serializable data-types")
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=default_conf,
doc_md=doc_md,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
try:
execution_date = timezone.parse(request_execution_date)
except ParserError:
flash("Invalid execution date", "error")
form = DateTimeForm(data={"execution_date": timezone.utcnow().isoformat()})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf if request_conf else {},
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
dr = DagRun.find_duplicate(dag_id=dag_id, run_id=run_id, execution_date=execution_date)
if dr:
if dr.run_id == run_id:
message = f"The run ID {run_id} already exists"
else:
message = f"The logical date {execution_date} already exists"
flash(message, "error")
return redirect(origin)
regex = conf.get("scheduler", "allowed_run_id_pattern")
if run_id and not re.match(RUN_ID_REGEX, run_id):
if not regex.strip() or not re.match(regex.strip(), run_id):
flash(
f"The provided run ID '{run_id}' is invalid. It does not match either "
f"the configured pattern: '{regex}' or the built-in pattern: '{RUN_ID_REGEX}'",
"error",
)
form = DateTimeForm(data={"execution_date": execution_date})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
run_conf = {}
if request_conf:
try:
run_conf = json.loads(request_conf)
if not isinstance(run_conf, dict):
flash("Invalid JSON configuration, must be a dict", "error")
form = DateTimeForm(data={"execution_date": execution_date})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
except json.decoder.JSONDecodeError:
flash("Invalid JSON configuration, not parseable", "error")
form = DateTimeForm(data={"execution_date": execution_date})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
if unpause and dag.get_is_paused():
dag_model = models.DagModel.get_dagmodel(dag_id)
if dag_model is not None:
dag_model.set_is_paused(is_paused=False)
try:
dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=execution_date,
data_interval=dag.timetable.infer_manual_data_interval(run_after=execution_date),
state=State.QUEUED,
conf=run_conf,
external_trigger=True,
dag_hash=get_airflow_app().dag_bag.dags_hash.get(dag_id),
run_id=run_id,
)
except (ValueError, ParamValidationError) as ve:
flash(f"{ve}", "error")
form = DateTimeForm(data={"execution_date": execution_date})
# Take over "bad" submitted fields for new form display
for k, v in form_fields.items():
form_fields[k]["value"] = run_conf[k]
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
)
flash(f"Triggered {dag_id}, it should start any moment now.")
return redirect(origin) | def trigger(self, dag_id: str, session: Session = NEW_SESSION):
"""Triggers DAG Run."""
run_id = request.values.get("run_id", "").replace(" ", "+")
origin = get_safe_url(request.values.get("origin"))
unpause = request.values.get("unpause")
request_conf = request.values.get("conf")
request_execution_date = request.values.get("execution_date", default=timezone.utcnow().isoformat())
is_dag_run_conf_overrides_params = conf.getboolean("core", "dag_run_conf_overrides_params")
dag = get_airflow_app().dag_bag.get_dag(dag_id)
dag_orm: DagModel = session.query(DagModel).filter(DagModel.dag_id == dag_id).first()
# Prepare form fields with param struct details to render a proper form with schema information
form_fields = {}
for k, v in dag.params.items():
form_fields[k] = v.dump()
# If no schema is provided, auto-detect on default values
if "schema" not in form_fields[k]:
form_fields[k]["schema"] = {}
if "type" not in form_fields[k]["schema"]:
if isinstance(form_fields[k]["value"], bool):
form_fields[k]["schema"]["type"] = "boolean"
elif isinstance(form_fields[k]["value"], int):
form_fields[k]["schema"]["type"] = ["integer", "null"]
elif isinstance(form_fields[k]["value"], list):
form_fields[k]["schema"]["type"] = ["array", "null"]
elif isinstance(form_fields[k]["value"], dict):
form_fields[k]["schema"]["type"] = ["object", "null"]
# Mark markup fields as safe
if (
"description_html" in form_fields[k]["schema"]
and form_fields[k]["schema"]["description_html"]
):
form_fields[k]["description"] = Markup(form_fields[k]["schema"]["description_html"])
if "custom_html_form" in form_fields[k]["schema"]:
form_fields[k]["schema"]["custom_html_form"] = Markup(
form_fields[k]["schema"]["custom_html_form"]
)
ui_fields_defined = any("const" not in f["schema"] for f in form_fields.values())
if not dag_orm:
flash(f"Cannot find dag {dag_id}")
return redirect(origin)
if dag_orm.has_import_errors:
flash(f"Cannot create dagruns because the dag {dag_id} has import errors", "error")
return redirect(origin)
recent_runs = (
session.query(
DagRun.conf, func.max(DagRun.run_id).label("run_id"), func.max(DagRun.execution_date)
)
.filter(
DagRun.dag_id == dag_id,
DagRun.run_type == DagRunType.MANUAL,
DagRun.conf.isnot(None),
)
.group_by(DagRun.conf)
.order_by(func.max(DagRun.execution_date).desc())
.limit(5)
)
recent_confs = {
run_id: json.dumps(run_conf)
for run_id, run_conf in ((run.run_id, run.conf) for run in recent_runs)
if isinstance(run_conf, dict) and any(run_conf)
}
if request.method == "GET" and ui_fields_defined:
# Populate conf textarea with conf requests parameter, or dag.params
default_conf = ""
doc_md = wwwutils.wrapped_markdown(getattr(dag, "doc_md", None))
form = DateTimeForm(data={"execution_date": request_execution_date})
if request_conf:
default_conf = request_conf
else:
try:
default_conf = json.dumps(
{str(k): v.resolve(suppress_exception=True) for k, v in dag.params.items()},
indent=4,
ensure_ascii=False,
)
except TypeError:
flash("Could not pre-populate conf field due to non-JSON-serializable data-types")
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=default_conf,
doc_md=doc_md,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
try:
execution_date = timezone.parse(request_execution_date)
except ParserError:
flash("Invalid execution date", "error")
form = DateTimeForm(data={"execution_date": timezone.utcnow().isoformat()})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf if request_conf else {},
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
dr = DagRun.find_duplicate(dag_id=dag_id, run_id=run_id, execution_date=execution_date)
if dr:
if dr.run_id == run_id:
message = f"The run ID {run_id} already exists"
else:
message = f"The logical date {execution_date} already exists"
flash(message, "error")
return redirect(origin)
regex = conf.get("scheduler", "allowed_run_id_pattern")
if run_id and not re.match(RUN_ID_REGEX, run_id):
if not regex.strip() or not re.match(regex.strip(), run_id):
flash(
f"The provided run ID '{run_id}' is invalid. It does not match either "
f"the configured pattern: '{regex}' or the built-in pattern: '{RUN_ID_REGEX}'",
"error",
)
form = DateTimeForm(data={"execution_date": execution_date})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
run_conf = {}
if request_conf:
try:
run_conf = json.loads(request_conf)
if not isinstance(run_conf, dict):
flash("Invalid JSON configuration, must be a dict", "error")
form = DateTimeForm(data={"execution_date": execution_date})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
except json.decoder.JSONDecodeError:
flash("Invalid JSON configuration, not parseable", "error")
form = DateTimeForm(data={"execution_date": execution_date})
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
recent_confs=recent_confs,
)
if unpause and dag.get_is_paused():
dag_model = models.DagModel.get_dagmodel(dag_id)
if dag_model is not None:
dag_model.set_is_paused(is_paused=False)
try:
dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=execution_date,
data_interval=dag.timetable.infer_manual_data_interval(run_after=execution_date),
state=State.QUEUED,
conf=run_conf,
external_trigger=True,
dag_hash=get_airflow_app().dag_bag.dags_hash.get(dag_id),
run_id=run_id,
)
except (ValueError, ParamValidationError) as ve:
flash(f"{ve}", "error")
form = DateTimeForm(data={"execution_date": execution_date})
# Take over "bad" submitted fields for new form display
for k, v in form_fields.items():
form_fields[k]["value"] = run_conf[k]
return self.render_template(
"airflow/trigger.html",
form_fields=form_fields,
dag=dag,
dag_id=dag_id,
origin=origin,
conf=request_conf,
form=form,
is_dag_run_conf_overrides_params=is_dag_run_conf_overrides_params,
)
flash(f"Triggered {dag_id}, it should start any moment now.")
return redirect(origin) | Merge branch 'main' into disable-default-test-connection-functionality-on-ui | https://github.com/apache/airflow/commit/c62ed8a0a2f51fabc0033839d7c3b8296b620db4 | null | null | airflow/www/views.py | 0 | py | false | 2023-07-04T13:09:43Z |
MeshProber.Determination buildClusterMesh(ReadDeploymentResults readDepl) {
final boolean bareAtStartup = m_config.m_forceVoltdbCreate
|| pathsWithRecoverableArtifacts(readDepl.deployment).isEmpty();
setBare(bareAtStartup);
final Supplier<Integer> hostCountSupplier = new Supplier<Integer>() {
@Override
public Integer get() {
return getHostCount();
}
};
ClusterType clusterType = readDepl.deployment.getCluster();
MeshProber criteria = MeshProber.builder()
.coordinators(m_config.m_coordinators)
.versionChecker(m_versionChecker)
.enterprise(m_config.m_isEnterprise)
.startAction(m_config.m_startAction)
.bare(bareAtStartup)
.configHash(CatalogUtil.makeDeploymentHashForConfig(readDepl.deploymentBytes))
.hostCountSupplier(hostCountSupplier)
.kfactor(clusterType.getKfactor())
.paused(m_config.m_isPaused)
.nodeStateSupplier(m_statusTracker.getSupplier())
.addAllowed(m_config.m_enableAdd)
.safeMode(m_config.m_safeMode)
.terminusNonce(getTerminusNonce())
.licenseHash(m_licensing.getLicenseHash())
.missingHostCount(m_config.m_missingHostCount)
.build();
m_meshProbe.set(criteria);
HostAndPort hostAndPort = criteria.getLeader();
String hostname = hostAndPort.getHost();
int port = hostAndPort.getPort();
org.voltcore.messaging.HostMessenger.Config hmconfig;
hmconfig = new org.voltcore.messaging.HostMessenger.Config(hostname, port, m_config.m_isPaused);
if (m_config.m_placementGroup != null) {
hmconfig.group = m_config.m_placementGroup;
}
hmconfig.internalPort = m_config.m_internalPort;
hmconfig.internalInterface = m_config.m_internalInterface;
hmconfig.zkPort = m_config.m_zkPort;
hmconfig.zkInterface = m_config.m_zkInterface;
hmconfig.deadHostTimeout = m_config.m_deadHostTimeoutMS;
hmconfig.factory = new VoltDbMessageFactory();
hmconfig.coreBindIds = m_config.m_networkCoreBindings;
hmconfig.acceptor = criteria;
hmconfig.localSitesCount = m_config.m_sitesperhost;
// OpsAgents can handle unknown site ID response so register those sites for that response
hmconfig.respondUnknownSite = Stream.of(OpsSelector.values()).map(OpsSelector::getSiteId)
.collect(Collectors.toSet());
if (!StringUtils.isEmpty(m_config.m_recoveredPartitions)) {
hmconfig.recoveredPartitions = m_config.m_recoveredPartitions;
}
//if SSL needs to be enabled for internal communication, SSL context has to be setup before starting HostMessenger
SslSetup.setupSSL(m_config, readDepl.deployment);
if (m_config.m_sslInternal) {
m_messenger = new org.voltcore.messaging.HostMessenger(hmconfig, this, m_config.m_sslServerContext,
m_config.m_sslClientContext);
} else {
m_messenger = new org.voltcore.messaging.HostMessenger(hmconfig, this);
}
hostLog.infoFmt("Beginning inter-node communication on port %d.", m_config.m_internalPort);
try {
m_messenger.start();
} catch (Exception e) {
boolean printStackTrace = true;
// do not log fatal exception message in these cases
if (e.getMessage() != null) {
if (e.getMessage().indexOf(SocketJoiner.FAIL_ESTABLISH_MESH_MSG) > -1 ||
e.getMessage().indexOf(MeshProber.MESH_ONE_REJOIN_MSG )> -1) {
printStackTrace = false;
}
}
VoltDB.crashLocalVoltDB(e.getMessage(), printStackTrace, e);
}
VoltZK.createPersistentZKNodes(m_messenger.getZK());
// Use the host messenger's hostId.
m_myHostId = m_messenger.getHostId();
hostLog.infoFmt("Host id of this node is: %d", m_myHostId);
consoleLog.infoFmt("Host id of this node is: %d", m_myHostId);
// This is where we wait
MeshProber.Determination determination = criteria.waitForDetermination();
m_meshProbe.set(null);
if (determination.startAction == null) {
VoltDB.crashLocalVoltDB("Shutdown invoked before Cluster Mesh was established.", false, null);
}
// paused is determined in the mesh formation exchanged
if (determination.paused) {
m_messenger.pause();
} else {
m_messenger.unpause();
}
// Semi-hacky check to see if we're attempting to rejoin to ourselves.
// The leader node gets assigned host ID 0, always, so if we're the
// leader and we're rejoining, this is clearly bad.
if (m_myHostId == 0 && determination.startAction.doesJoin()) {
VoltDB.crashLocalVoltDB("Unable to rejoin a node to itself. " +
"Please check your command line and start action and try again.", false, null);
}
// load or store settings form/to zookeeper
if (determination.startAction.doesJoin()) {
m_clusterSettings.load(m_messenger.getZK());
m_clusterSettings.get().store();
} else if (m_myHostId == 0) {
if (hostLog.isDebugEnabled()) {
hostLog.debug("Writing initial hostcount " +
m_clusterSettings.get().getProperty(ClusterSettings.HOST_COUNT) +
" to ZK");
}
m_clusterSettings.store(m_messenger.getZK());
}
m_clusterCreateTime = m_messenger.getInstanceId().getTimestamp();
return determination;
} | MeshProber.Determination buildClusterMesh(ReadDeploymentResults readDepl) {
final boolean bareAtStartup = m_config.m_forceVoltdbCreate
|| pathsWithRecoverableArtifacts(readDepl.deployment).isEmpty();
setBare(bareAtStartup);
final Supplier<Integer> hostCountSupplier = new Supplier<Integer>() {
@Override
public Integer get() {
return getHostCount();
}
};
ClusterType clusterType = readDepl.deployment.getCluster();
MeshProber criteria = MeshProber.builder()
.coordinators(m_config.m_coordinators)
.versionChecker(m_versionChecker)
.enterprise(m_config.m_isEnterprise)
.startAction(m_config.m_startAction)
.bare(bareAtStartup)
.configHash(CatalogUtil.makeDeploymentHashForConfig(readDepl.deploymentBytes))
.hostCountSupplier(hostCountSupplier)
.kfactor(clusterType.getKfactor())
.paused(m_config.m_isPaused)
.nodeStateSupplier(m_statusTracker.getSupplier())
.addAllowed(m_config.m_enableAdd)
.safeMode(m_config.m_safeMode)
.terminusNonce(getTerminusNonce())
.licenseHash(m_licensing.getLicenseHash())
.missingHostCount(m_config.m_missingHostCount)
.build();
m_meshProbe.set(criteria);
HostAndPort hostAndPort = criteria.getLeader();
String hostname = hostAndPort.getHost();
int port = hostAndPort.getPort();
org.voltcore.messaging.HostMessenger.Config hmconfig;
hmconfig = new org.voltcore.messaging.HostMessenger.Config(hostname, port, m_config.m_isPaused);
if (m_config.m_placementGroup != null) {
hmconfig.group = m_config.m_placementGroup;
}
hmconfig.internalPort = m_config.m_internalPort;
hmconfig.internalInterface = m_config.m_internalInterface;
hmconfig.zkPort = m_config.m_zkPort;
hmconfig.zkInterface = m_config.m_zkInterface;
hmconfig.deadHostTimeout = m_config.m_deadHostTimeoutMS;
hmconfig.factory = new VoltDbMessageFactory();
hmconfig.coreBindIds = m_config.m_networkCoreBindings;
hmconfig.acceptor = criteria;
hmconfig.localSitesCount = m_config.m_sitesperhost;
// OpsAgents can handle unknown site ID response so register those sites for that response
hmconfig.respondUnknownSite = Stream.of(OpsSelector.values()).map(OpsSelector::getSiteId)
.collect(Collectors.toSet());
if (!StringUtils.isEmpty(m_config.m_recoveredPartitions)) {
hmconfig.recoveredPartitions = m_config.m_recoveredPartitions;
}
//if SSL needs to be enabled for internal communication, SSL context has to be setup before starting HostMessenger
SslSetup.setupSSL(m_config, readDepl.deployment);
if (m_config.m_sslInternal) {
m_messenger = new org.voltcore.messaging.HostMessenger(hmconfig, this, m_config.m_sslServerContext,
m_config.m_sslClientContext);
} else {
m_messenger = new org.voltcore.messaging.HostMessenger(hmconfig, this);
}
hostLog.infoFmt("Beginning inter-node communication on port %d.", m_config.m_internalPort);
try {
m_messenger.start();
} catch (Exception e) {
boolean printStackTrace = true;
// do not log fatal exception message in these cases
if (e.getMessage() != null) {
if (e.getMessage().indexOf(SocketJoiner.FAIL_ESTABLISH_MESH_MSG) > -1 ||
e.getMessage().indexOf(MeshProber.MESH_ONE_REJOIN_MSG )> -1) {
printStackTrace = false;
}
}
VoltDB.crashLocalVoltDB(e.getMessage(), printStackTrace, e);
}
VoltZK.createPersistentZKNodes(m_messenger.getZK());
// Use the host messenger's hostId.
m_myHostId = m_messenger.getHostId();
hostLog.infoFmt("Host id of this node is: %d", m_myHostId);
consoleLog.infoFmt("Host id of this node is: %d", m_myHostId);
// This is where we wait
MeshProber.Determination determination = criteria.waitForDetermination();
m_meshProbe.set(null);
if (determination.startAction == null) {
VoltDB.crashLocalVoltDB("Shutdown invoked before Cluster Mesh was established.");
}
// paused is determined in the mesh formation exchanged
if (determination.paused) {
m_messenger.pause();
} else {
m_messenger.unpause();
}
// Semi-hacky check to see if we're attempting to rejoin to ourselves.
// The leader node gets assigned host ID 0, always, so if we're the
// leader and we're rejoining, this is clearly bad.
if (m_myHostId == 0 && determination.startAction.doesJoin()) {
VoltDB.crashLocalVoltDB("Unable to rejoin a node to itself. " +
"Please check your command line and start action and try again.");
}
// load or store settings form/to zookeeper
if (determination.startAction.doesJoin()) {
m_clusterSettings.load(m_messenger.getZK());
m_clusterSettings.get().store();
} else if (m_myHostId == 0) {
if (hostLog.isDebugEnabled()) {
hostLog.debug("Writing initial hostcount " +
m_clusterSettings.get().getProperty(ClusterSettings.HOST_COUNT) +
" to ZK");
}
m_clusterSettings.store(m_messenger.getZK());
}
m_clusterCreateTime = m_messenger.getInstanceId().getTimestamp();
return determination;
} | ENG-22241, skip crash file in junit test (#1172)
VoltDB.crashLocalVoltDB said it was declining to write a crash file in a junit test, and then did it anyway.
Fixed now. We actually decline to write the crash file in a junit test. All other crash actions still apply.
Other sundry cleanup.
- Move crash file writer to a separate private method (makes skipping the call easier to read)
- Fix bogus claim that crashLocalVoltDB returns an exception object
- Replace a few '3-arg' calls with a simpler '1-arg' call where there's only a message text
- Add the missing 2-arg variant to complement the 1, 3, and 4-arg ones we have
- Replace the "get all thread stacktraces and then ignore all but the current thread" call
. Make private some methods that had no need to be public | https://github.com/VoltDB/voltdb/commit/11c1626969f246f2c426db29a2c7b5f46b4d4780 | null | null | src/frontend/org/voltdb/RealVoltDB.java | 0 | java | false | 2022-03-21T17:28:28Z |
@Override
public AnimationPose sample(LocatorSkeleton locatorSkeleton, AnimationDataContainer.CachedPoseContainer cachedPoseContainer) {
if(this.activeStates.size() > 0){
AnimationPose animationPose = this.getPoseFromState(this.activeStates.get(0), locatorSkeleton);
if(this.activeStates.size() > 1){
for(String stateIdentifier : this.activeStates){
animationPose = AnimationPose.blend(this.getPoseFromState(stateIdentifier, locatorSkeleton), animationPose, this.statesHashMap.get(stateIdentifier).getWeight(), this.statesHashMap.get(stateIdentifier).getCurrentTransition().getEasing());
}
}
//AnimationOverhaulMain.LOGGER.info(this.activeStates.toString());
return animationPose;
}
AnimationOverhaulMain.LOGGER.warn("No active states in state machine {}", this.getIdentifier());
return new AnimationPose(locatorSkeleton);
} | @Override
public AnimationPose sample(LocatorSkeleton locatorSkeleton, AnimationDataContainer.CachedPoseContainer cachedPoseContainer) {
if(this.activeStates.size() > 0){
AnimationPose animationPose = this.getPoseFromState(this.activeStates.get(0), locatorSkeleton);
if(this.activeStates.size() > 1){
for(String stateIdentifier : this.activeStates){
animationPose.blend(
this.getPoseFromState(stateIdentifier, locatorSkeleton),
this.statesHashMap.get(stateIdentifier).getWeight(),
this.statesHashMap.get(stateIdentifier).getCurrentTransition().getEasing());
}
}
//AnimationOverhaulMain.LOGGER.info(this.activeStates.toString());
return animationPose;
}
AnimationOverhaulMain.LOGGER.warn("No active states in state machine {}", this.getIdentifier());
return new AnimationPose(locatorSkeleton);
} | First person walking and general improvements
- Adjusted how attack animations are triggered (WIP), mining and item use no longer drive the attack animation (except at the end of mining)
- Improved blend function to actually make sense from an interfacing perspective
- Added function for getting native animation timeline groups, using the mod's namespace as a shortcut
- Extended more of incrementing animation variables
- Changed the first person item variable to be based off the item stack's default value instead of the item stack (to keep it from updating based on durability or stack changes, currently does not work properly with empty hands)
- Implemented easing into animation montages
- Added first person walking
- Added better mirroring functions. To remove mirroring functions from the animation pose get from timeline group function. | https://github.com/Trainguy9512/trainguys-animation-overhaul/commit/ccad69c6587e3f6e24887e546a035b78bdf4a074 | null | null | src/main/java/com/trainguy9512/animationoverhaul/animation/pose/sample/AnimationStateMachine.java | 0 | java | false | null |
private void startGame() {
StartGamePacket startGamePacket = new StartGamePacket();
startGamePacket.setUniqueEntityId(playerEntity.getGeyserId());
startGamePacket.setRuntimeEntityId(playerEntity.getGeyserId());
startGamePacket.setPlayerGameType(switch (gameMode) {
case CREATIVE -> GameType.CREATIVE;
case ADVENTURE -> GameType.ADVENTURE;
default -> GameType.SURVIVAL;
});
startGamePacket.setPlayerPosition(Vector3f.from(0, 69, 0));
startGamePacket.setRotation(Vector2f.from(1, 1));
startGamePacket.setSeed(-1L);
startGamePacket.setDimensionId(DimensionUtils.javaToBedrock(dimension));
startGamePacket.setGeneratorId(1);
startGamePacket.setLevelGameType(GameType.SURVIVAL);
startGamePacket.setDifficulty(1);
startGamePacket.setDefaultSpawn(Vector3i.ZERO);
startGamePacket.setAchievementsDisabled(!geyser.getConfig().isXboxAchievementsEnabled());
startGamePacket.setCurrentTick(-1);
startGamePacket.setEduEditionOffers(0);
startGamePacket.setEduFeaturesEnabled(false);
startGamePacket.setRainLevel(0);
startGamePacket.setLightningLevel(0);
startGamePacket.setMultiplayerGame(true);
startGamePacket.setBroadcastingToLan(true);
startGamePacket.setPlatformBroadcastMode(GamePublishSetting.PUBLIC);
startGamePacket.setXblBroadcastMode(GamePublishSetting.PUBLIC);
startGamePacket.setCommandsEnabled(!geyser.getConfig().isXboxAchievementsEnabled());
startGamePacket.setTexturePacksRequired(false);
startGamePacket.setBonusChestEnabled(false);
startGamePacket.setStartingWithMap(false);
startGamePacket.setTrustingPlayers(true);
startGamePacket.setDefaultPlayerPermission(PlayerPermission.MEMBER);
startGamePacket.setServerChunkTickRange(4);
startGamePacket.setBehaviorPackLocked(false);
startGamePacket.setResourcePackLocked(false);
startGamePacket.setFromLockedWorldTemplate(false);
startGamePacket.setUsingMsaGamertagsOnly(false);
startGamePacket.setFromWorldTemplate(false);
startGamePacket.setWorldTemplateOptionLocked(false);
String serverName = geyser.getConfig().getBedrock().serverName();
startGamePacket.setLevelId(serverName);
startGamePacket.setLevelName(serverName);
startGamePacket.setPremiumWorldTemplateId("00000000-0000-0000-0000-000000000000");
// startGamePacket.setCurrentTick(0);
startGamePacket.setEnchantmentSeed(0);
startGamePacket.setMultiplayerCorrelationId("");
startGamePacket.setItemEntries(this.itemMappings.getItemEntries());
startGamePacket.setVanillaVersion("*");
startGamePacket.setInventoriesServerAuthoritative(true);
startGamePacket.setServerEngine(""); // Do we want to fill this in?
startGamePacket.setPlayerPropertyData(NbtMap.EMPTY);
startGamePacket.setWorldTemplateId(UUID.randomUUID());
startGamePacket.setChatRestrictionLevel(ChatRestrictionLevel.NONE);
SyncedPlayerMovementSettings settings = new SyncedPlayerMovementSettings();
settings.setMovementMode(AuthoritativeMovementMode.CLIENT);
settings.setRewindHistorySize(0);
settings.setServerAuthoritativeBlockBreaking(false);
startGamePacket.setPlayerMovementSettings(settings);
upstream.sendPacket(startGamePacket);
} | private void startGame() {
StartGamePacket startGamePacket = new StartGamePacket();
startGamePacket.setUniqueEntityId(playerEntity.getGeyserId());
startGamePacket.setRuntimeEntityId(playerEntity.getGeyserId());
startGamePacket.setPlayerGameType(switch (gameMode) {
case CREATIVE -> GameType.CREATIVE;
case ADVENTURE -> GameType.ADVENTURE;
default -> GameType.SURVIVAL;
});
startGamePacket.setPlayerPosition(Vector3f.from(0, 69, 0));
startGamePacket.setRotation(Vector2f.from(1, 1));
startGamePacket.setSeed(-1L);
startGamePacket.setDimensionId(DimensionUtils.javaToBedrock(chunkCache.getBedrockDimension()));
startGamePacket.setGeneratorId(1);
startGamePacket.setLevelGameType(GameType.SURVIVAL);
startGamePacket.setDifficulty(1);
startGamePacket.setDefaultSpawn(Vector3i.ZERO);
startGamePacket.setAchievementsDisabled(!geyser.getConfig().isXboxAchievementsEnabled());
startGamePacket.setCurrentTick(-1);
startGamePacket.setEduEditionOffers(0);
startGamePacket.setEduFeaturesEnabled(false);
startGamePacket.setRainLevel(0);
startGamePacket.setLightningLevel(0);
startGamePacket.setMultiplayerGame(true);
startGamePacket.setBroadcastingToLan(true);
startGamePacket.setPlatformBroadcastMode(GamePublishSetting.PUBLIC);
startGamePacket.setXblBroadcastMode(GamePublishSetting.PUBLIC);
startGamePacket.setCommandsEnabled(!geyser.getConfig().isXboxAchievementsEnabled());
startGamePacket.setTexturePacksRequired(false);
startGamePacket.setBonusChestEnabled(false);
startGamePacket.setStartingWithMap(false);
startGamePacket.setTrustingPlayers(true);
startGamePacket.setDefaultPlayerPermission(PlayerPermission.MEMBER);
startGamePacket.setServerChunkTickRange(4);
startGamePacket.setBehaviorPackLocked(false);
startGamePacket.setResourcePackLocked(false);
startGamePacket.setFromLockedWorldTemplate(false);
startGamePacket.setUsingMsaGamertagsOnly(false);
startGamePacket.setFromWorldTemplate(false);
startGamePacket.setWorldTemplateOptionLocked(false);
String serverName = geyser.getConfig().getBedrock().serverName();
startGamePacket.setLevelId(serverName);
startGamePacket.setLevelName(serverName);
startGamePacket.setPremiumWorldTemplateId("00000000-0000-0000-0000-000000000000");
// startGamePacket.setCurrentTick(0);
startGamePacket.setEnchantmentSeed(0);
startGamePacket.setMultiplayerCorrelationId("");
startGamePacket.setItemEntries(this.itemMappings.getItemEntries());
startGamePacket.setVanillaVersion("*");
startGamePacket.setInventoriesServerAuthoritative(true);
startGamePacket.setServerEngine(""); // Do we want to fill this in?
startGamePacket.setPlayerPropertyData(NbtMap.EMPTY);
startGamePacket.setWorldTemplateId(UUID.randomUUID());
startGamePacket.setChatRestrictionLevel(ChatRestrictionLevel.NONE);
SyncedPlayerMovementSettings settings = new SyncedPlayerMovementSettings();
settings.setMovementMode(AuthoritativeMovementMode.CLIENT);
settings.setRewindHistorySize(0);
settings.setServerAuthoritativeBlockBreaking(false);
startGamePacket.setPlayerMovementSettings(settings);
upstream.sendPacket(startGamePacket);
} | Fix crashes when joining a server in the Nether | https://github.com/GeyserMC/Geyser/commit/886d7e5b4b44e84a7cf91139ac9305b2180c8c88 | null | null | core/src/main/java/org/geysermc/geyser/session/GeyserSession.java | 0 | java | false | 2022-11-12T15:28:53Z |
function Ni(t,e,i,n,s){return t===e||(null==t||null==e||!eo(t)&&!eo(e)?t!=t&&e!=e:function(t,e,i,n,s,o){var r=Ya(t),a=Ya(e),l=r?wt:ia(t),u=a?wt:ia(e),c=(l=l==bt?Ot:l)==Ot,h=(u=u==bt?Ot:u)==Ot,d=l==u;if(d&&Ka(t)){if(!Ka(e))return!1;r=!0,c=!1}if(d&&!c)return o||(o=new Me),r||tl(t)?ts(t,e,i,n,s,o):function(t,e,i,n,s,o,r){switch(i){case zt:if(t.byteLength!=e.byteLength||t.byteOffset!=e.byteOffset)return!1;t=t.buffer,e=e.buffer;case Rt:return!(t.byteLength!=e.byteLength||!o(new er(t),new er(e)));case Ct:case kt:case $t:return Ys(+t,+e);case Tt:return t.name==e.name&&t.message==e.message;case Mt:case jt:return t==e+"";case Et:var a=j;case Nt:var l=n&Q;if(a||(a=R),t.size!=e.size&&!l)return!1;var u=r.get(t);if(u)return u==e;n|=J,r.set(t,e);var c=ts(a(t),a(e),n,s,o,r);return r.delete(t),c;case Ft:if(zr)return zr.call(t)==zr.call(e)}return!1}(t,e,l,i,n,s,o);if(!(i&Q)){var p=c&&Vo.call(t,"__wrapped__"),f=h&&Vo.call(e,"__wrapped__");if(p||f){var g=p?t.value():t,m=f?e.value():e;return o||(o=new Me),s(g,m,i,n,o)}}return!!d&&(o||(o=new Me),function(t,e,i,n,s,o){var r=i&Q,a=is(t),l=a.length;if(l!=is(e).length&&!r)return!1;for(var u=l;u--;){var c=a[u];if(!(r?c in e:Vo.call(e,c)))return!1}var h=o.get(t);if(h&&o.get(e))return h==e;var d=!0;o.set(t,e),o.set(e,t);for(var p=r;++u<l;){c=a[u];var f=t[c],g=e[c];if(n)var m=r?n(g,f,c,e,t,o):n(f,g,c,t,e,o);if(!(m===B?f===g||s(f,g,i,n,o):m)){d=!1;break}p||(p="constructor"==c)}if(d&&!p){var v=t.constructor,_=e.constructor;v!=_&&"constructor"in t&&"constructor"in e&&!("function"==typeof v&&v instanceof v&&"function"==typeof _&&_ instanceof _)&&(d=!1)}return o.delete(t),o.delete(e),d}(t,e,i,n,s,o))}(t,e,i,n,Ni,s))} | function Ni(t,e,i,n,s){var r=t.def&&t.def[e];if(r)try{r(i.elm,t,i,n,s)}catch(n){q(n,i.context,"directive "+t.name+" "+e+" hook")}} | Compiled production assets | https://github.com/snipe/snipe-it/commit/8cfd00d203af61b9140155dab3b8b6e54ba17d8c | CVE-2019-10118 | ['CWE-79'] | public/js/build/all.js | 0 | js | false | null |
def execute(self, context: Context):
vertica = VerticaHook(vertica_conn_id=self.vertica_conn_id)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
if self.bulk_load:
self._bulk_load_transfer(mysql, vertica)
else:
self._non_bulk_load_transfer(mysql, vertica)
if self.mysql_postoperator:
self.log.info("Running MySQL postoperator...")
mysql.run(self.mysql_postoperator)
self.log.info("Done") | def execute(self, context: Context):
vertica = VerticaHook(vertica_conn_id=self.vertica_conn_id)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id, local_infile=self.bulk_load)
if self.bulk_load:
self._bulk_load_transfer(mysql, vertica)
else:
self._non_bulk_load_transfer(mysql, vertica)
if self.mysql_postoperator:
self.log.info("Running MySQL postoperator...")
mysql.run(self.mysql_postoperator)
self.log.info("Done") | Move local_infile option from extra to hook parameter
This change is to move local_infile parameter from connection
extra to Hook. Since this feature is only used for very specific
cases, it belongs to the "action" it executes not to the connection
defined in general. For example in Hive and Vertica transfers, the
capability of local_inline is simply exnabled by bulk_load
parameter - and it allows to use the same connection in both cases. | https://github.com/apache/airflow/commit/a8c0e9c9655ccc6bd5cc1fe5de8b228996b23bf5 | null | null | airflow/providers/mysql/transfers/vertica_to_mysql.py | 0 | py | false | 2023-01-09T18:52:43Z |
function ListItemView(locale) {
var _this;
Object(_var_www_lime25_limesurvey_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_classCallCheck__WEBPACK_IMPORTED_MODULE_0__["default"])(this, ListItemView);
_this = Object(_var_www_lime25_limesurvey_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_possibleConstructorReturn__WEBPACK_IMPORTED_MODULE_2__["default"])(this, Object(_var_www_lime25_limesurvey_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_getPrototypeOf__WEBPACK_IMPORTED_MODULE_3__["default"])(ListItemView).call(this, locale));
/**
* Collection of the child views inside of the list item {@link #element}.
*
* @readonly
* @member {module:ui/viewcollection~ViewCollection}
*/
_this.children = _this.createCollection();
_this.setTemplate({
tag: 'li',
attributes: {
class: ['ck', 'ck-list__item']
},
children: _this.children
});
return _this;
} | function ListItemView(locale) {
var _this;
Object(_var_www_limedev_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_classCallCheck__WEBPACK_IMPORTED_MODULE_0__["default"])(this, ListItemView);
_this = Object(_var_www_limedev_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_possibleConstructorReturn__WEBPACK_IMPORTED_MODULE_2__["default"])(this, Object(_var_www_limedev_assets_packages_questiongroup_node_modules_babel_runtime_corejs2_helpers_esm_getPrototypeOf__WEBPACK_IMPORTED_MODULE_3__["default"])(ListItemView).call(this, locale));
/**
* Collection of the child views inside of the list item {@link #element}.
*
* @readonly
* @member {module:ui/viewcollection~ViewCollection}
*/
_this.children = _this.createCollection();
_this.setTemplate({
tag: 'li',
attributes: {
class: ['ck', 'ck-list__item']
},
children: _this.children
});
return _this;
} | DEV: changed routes in vue.js part questiongroupedit.js and deleted old controller+views | https://github.com/LimeSurvey/LimeSurvey/commit/d7a309bf1a73e95187528982b9db7ec03fee6913 | CVE-2020-23710 | ['CWE-79'] | assets/packages/questiongroup/build/js/questiongroupedit.js | 0 | js | false | 2020-06-08T09:23:29Z |
void
dirpool_make_dirtraverse(Dirpool *dp)
{
Id parent, i, *dirtraverse;
if (!dp->ndirs)
return;
dp->dirs = solv_extend_resize(dp->dirs, dp->ndirs, sizeof(Id), DIR_BLOCK);
dirtraverse = solv_calloc_block(dp->ndirs, sizeof(Id), DIR_BLOCK);
for (parent = 0, i = 0; i < dp->ndirs; i++)
{
if (dp->dirs[i] > 0)
continue;
parent = -dp->dirs[i];
dirtraverse[i] = dirtraverse[parent];
dirtraverse[parent] = i + 1;
}
dp->dirtraverse = dirtraverse;
} | void
dirpool_make_dirtraverse(Dirpool *dp)
{
Id parent, i, *dirtraverse;
if (!dp->ndirs)
return;
dp->dirs = solv_extend_resize(dp->dirs, dp->ndirs, sizeof(Id), DIR_BLOCK);
dirtraverse = solv_calloc_block(dp->ndirs, sizeof(Id), DIR_BLOCK);
for (i = 0; i < dp->ndirs; i++)
{
if (dp->dirs[i] > 0)
continue;
parent = -dp->dirs[i];
dirtraverse[i] = dirtraverse[parent];
dirtraverse[parent] = i + 1;
}
dp->dirtraverse = dirtraverse;
} | Don't set values that are never read | https://github.com/openSUSE/libsolv/commit/2d7b115fbbe6b2f2894221e010cd75638a8eaa37 | null | null | src/dirpool.c | 0 | c | false | null |
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
for token in self.client.generate(prompt, **self._default_params()):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
params = {**self._default_params(), **kwargs}
for token in self.client.generate(prompt, **params):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | Merge remote-tracking branch 'upstream/master' | https://github.com/hwchase17/langchain/commit/e12294f00cb3c6d3afd6eaf0541dc3056029fc10 | null | null | langchain/llms/gpt4all.py | 0 | py | false | 2023-06-21T06:45:39Z |
function handleGuestWaitingLeft({ body }, meetingId) {
const { userId } = body;
check(meetingId, String);
check(userId, String);
return removeGuest(meetingId, userId);
} | async function handleGuestWaitingLeft({ body }, meetingId) {
const { userId } = body;
check(meetingId, String);
check(userId, String);
const result = await removeGuest(meetingId, userId);
return result;
} | Merge branch 'v2.6.x-release' of github.com:bigbluebutton/bigbluebutton into ssrf-fix | https://github.com/bigbluebutton/bigbluebutton/commit/22de2b49a5d218910923a1048bb73395e53c99bf | CVE-2023-33176 | ['CWE-918'] | bigbluebutton-html5/imports/api/guest-users/server/handlers/guestWaitingLeft.js | 0 | js | false | 2023-04-13T12:40:07Z |
public int weight() {
int result = 0;
if (oldPath().isPresent()) {
result += stringSize(oldPath().get());
}
if (newPath().isPresent()) {
result += stringSize(newPath().get());
}
result += 20 + 20; // old and new commit IDs
result += 4; // comparison type
result += 4; // changeType
if (patchType().isPresent()) {
result += 4;
}
result += 4 + 4; // insertions and deletions
result += 4 + 4; // size and size delta
result += 20 * edits().size(); // each edit is 4 Integers + boolean = 4 * 4 + 4 = 20
for (String s : headerLines()) {
s += stringSize(s);
}
return result;
} | public int weight() {
int result = 0;
if (oldPath().isPresent()) {
result += stringSize(oldPath().get());
}
if (newPath().isPresent()) {
result += stringSize(newPath().get());
}
result += 20 + 20; // old and new commit IDs
result += 4; // comparison type
result += 4; // changeType
if (patchType().isPresent()) {
result += 4;
}
result += 4 + 4; // insertions and deletions
result += 4 + 4; // size and size delta
result += 20 * edits().size(); // each edit is 4 Integers + boolean = 4 * 4 + 4 = 20
for (String s : headerLines()) {
s += stringSize(s);
}
if (negative().isPresent()) {
result += 1;
}
return result;
} | Use faster fallback diff algorithm in case of timeouts
Current logic uses the default diff algorithm "Histogram Diff". In case
of timeouts, it throws an exception that is propagated to the caller.
In this change, we adjust the logic to follow what was implemented in
PatchListLoader (the old diff cache): If the diff execution times out,
we fallback to the faster histogram algorithm without myers diff and log
a warning message. Notice that in DiffOperations, the fallback is done
outside the cache, i.e. we are requesting the diff from the cache with
new keys that specify which algorithm should be used. In old diff cache,
this fallback was done inside the cache loader. This is one of the
advantages of the new diff cache that we can explicitly specify which
algorithm to use as part of the cache key.
This change implies that slow requests will always have to go through
the timeout before requesting the diffs using the faster algorithm. In a
follow up change, we can enhance the logic by caching negative results
for the histogram diff keys to quickly fallback without having to
hit the timeout.
Change-Id: I34fe29dc166534d835c97beab85661facafac31f | https://github.com/GerritCodeReview/gerrit/commit/541ac10c62fbfa3ad2204e9171c99c499ede46e7 | null | null | java/com/google/gerrit/server/patch/filediff/FileDiffOutput.java | 0 | java | false | null |
@Override
public void start(String strSql) {
this.sql = strSql;
if (this.checkPartition(strSql)) {
service.writeErrMessage(ErrorCode.ER_UNSUPPORTED_PS, " unsupported load data with Partition");
clear();
return;
}
try {
statement = (MySqlLoadDataInFileStatement) new MySqlStatementParser(strSql).parseStatement();
SchemaUtil.SchemaInfo schemaInfo = SchemaUtil.getSchemaInfo(service.getUser(), service.getSchema(), statement.getTableName(), null);
tableName = schemaInfo.getTable();
schema = schemaInfo.getSchemaConfig();
} catch (SQLException e) {
clear();
service.writeErrMessage(e.getSQLState(), e.getMessage(), e.getErrorCode());
return;
}
// if there are sharding in sql, remove it.
if (statement.getTableName() instanceof SQLPropertyExpr) {
statement.setTableName(new SQLIdentifierExpr(tableName));
}
tableConfig = schema.getTables().get(tableName);
if (!ProxyMeta.getInstance().getTmManager().checkTableExists(schema.getName(), tableName)) {
String msg = "Table '" + schema.getName() + "." + tableName + "' or table mata doesn't exist";
clear();
service.writeErrMessage("42S02", msg, ErrorCode.ER_NO_SUCH_TABLE);
return;
}
fileName = parseFileName(strSql);
if (fileName == null) {
service.writeErrMessage(ErrorCode.ER_FILE_NOT_FOUND, " file name is null !");
clear();
return;
}
tempPath = SystemConfig.getInstance().getHomePath() + File.separator + "temp" + File.separator + service.getConnection().getId() + File.separator;
tempFile = tempPath + "clientTemp.txt";
tempByteBuffer = new ByteArrayOutputStream();
if (!trySetPartitionOrAutoIncrementColumnIndex(statement)) {
return;
}
if (tableConfig != null && autoIncrementIndex == -1) {
final String incrementColumn = getIncrementColumn();
if (incrementColumn != null) {
statement.getColumns().add(new SQLIdentifierExpr(incrementColumn));
autoIncrementIndex = statement.getColumns().size() - 1;
appendAutoIncrementColumn = true;
sql = SQLUtils.toMySqlString(statement);
if (incrementColumn.equalsIgnoreCase(getPartitionColumn())) {
partitionColumnIndex = autoIncrementIndex;
}
}
}
if (tableConfig != null &&
(tableConfig instanceof ShardingTableConfig || tableConfig instanceof ChildTableConfig) &&
partitionColumnIndex == -1) {
service.writeErrMessage(ErrorCode.ER_KEY_COLUMN_DOES_NOT_EXITS, "can't find partition column.");
clear();
return;
}
parseLoadDataPram();
if (statement.isLocal()) {
//request file from client
ByteBuffer buffer = service.allocate();
RequestFilePacket filePacket = new RequestFilePacket();
filePacket.setFileName(fileName.getBytes());
filePacket.setPacketId(1);
filePacket.write(buffer, service, true);
} else {
if (!new File(fileName).exists()) {
String msg = fileName + " is not found!";
clear();
service.writeErrMessage(ErrorCode.ER_FILE_NOT_FOUND, msg);
} else {
if (parseFileByLine(fileName, loadData.getCharset())) {
RouteResultset rrs = buildResultSet(routeResultMap);
if (rrs != null) {
flushDataToFile();
service.getSession2().execute(rrs);
}
}
}
}
} | @Override
public void start(String strSql) {
this.sql = strSql;
if (this.checkPartition(strSql)) {
service.writeErrMessage(ErrorCode.ER_UNSUPPORTED_PS, " unsupported load data with Partition");
clear();
return;
}
try {
statement = (MySqlLoadDataInFileStatement) new MySqlStatementParser(strSql).parseStatement();
SchemaUtil.SchemaInfo schemaInfo = SchemaUtil.getSchemaInfo(service.getUser(), service.getSchema(), statement.getTableName(), null);
tableName = schemaInfo.getTable();
schema = schemaInfo.getSchemaConfig();
} catch (SQLException e) {
clear();
service.writeErrMessage(e.getSQLState(), e.getMessage(), e.getErrorCode());
return;
}
// if there are sharding in sql, remove it.
if (statement.getTableName() instanceof SQLPropertyExpr) {
statement.setTableName(new SQLIdentifierExpr(tableName));
}
tableConfig = schema.getTables().get(tableName);
if (!ProxyMeta.getInstance().getTmManager().checkTableExists(schema.getName(), tableName)) {
String msg = "Table '" + schema.getName() + "." + tableName + "' or table mata doesn't exist";
clear();
service.writeErrMessage("42S02", msg, ErrorCode.ER_NO_SUCH_TABLE);
return;
}
fileName = parseFileName(strSql);
if (fileName == null) {
service.writeErrMessage(ErrorCode.ER_FILE_NOT_FOUND, " file name is null !");
clear();
return;
}
tempPath = SystemConfig.getInstance().getHomePath() + File.separator + "temp" + File.separator + service.getConnection().getId() + File.separator;
tempFile = tempPath + "clientTemp.txt";
tempByteBuffer = new ByteArrayOutputStream();
if (!trySetPartitionOrAutoIncrementColumnIndex(statement)) {
return;
}
if (tableConfig != null && autoIncrementIndex == -1) {
final String incrementColumn = getIncrementColumn();
if (incrementColumn != null) {
statement.getColumns().add(new SQLIdentifierExpr(incrementColumn));
autoIncrementIndex = statement.getColumns().size() - 1;
appendAutoIncrementColumn = true;
sql = SQLUtils.toMySqlString(statement);
if (incrementColumn.equalsIgnoreCase(getPartitionColumn())) {
partitionColumnIndex = autoIncrementIndex;
}
}
}
if (tableConfig != null &&
(tableConfig instanceof ShardingTableConfig || tableConfig instanceof ChildTableConfig) &&
partitionColumnIndex == -1) {
service.writeErrMessage(ErrorCode.ER_KEY_COLUMN_DOES_NOT_EXITS, "can't find partition column.");
clear();
return;
}
parseLoadDataPram();
if (statement.isLocal()) {
//request file from client
ByteBuffer buffer = service.allocate();
RequestFilePacket filePacket = new RequestFilePacket();
filePacket.setFileName(fileName.getBytes());
filePacket.setPacketId(1);
filePacket.write(buffer, service, true);
} else {
if (!new File(fileName).exists()) {
String msg = fileName + " is not found!";
clear();
service.writeErrMessage(ErrorCode.ER_FILE_NOT_FOUND, msg);
} else {
if (parseFileByLine(fileName, loadData.getCharset())) {
RouteResultset rrs = buildResultSet(routeResultMap);
if (rrs != null) {
flushDataToFile();
service.getSession2().endParse();
service.getSession2().execute(rrs);
}
}
}
}
} | fix bug that "set autocommit=1" doesn't work,eg set autocommit=0;set autocommit=1;dml;rollback; dml is rollback | https://github.com/actiontech/dble/commit/9d7a16c06fe4b5e9188bc1403058c2821023bb36 | null | null | src/main/java/com/actiontech/dble/server/handler/ServerLoadDataInfileHandler.java | 0 | java | false | 2021-05-12T07:58:46Z |
protected InternalNode<T> fatBinarySearchExact(final LongArrayBitVector v, final long[] state, long a, long b) {
if (DDDEBUG) System.err.println("fatBinarySearchExact(" + v + ", [" + a + ".." + b + "])");
// We actually keep track of (a..b]
a--;
assert a <= b : a + " >= " + b;
final long length = v.length();
InternalNode<T> top = null;
long checkMask = -1L << Fast.ceilLog2(b - a);
while (a < b) {
assert checkMask != 0;
if (DDDEBUG) System.err.println("[" + (a + 1) + ".." + b + ")");
final long f = b & checkMask;
if ((a & checkMask) != f) {
if (DDDEBUG) System.err.println("Inquiring with key " + v.subVector(0, f) + " (" + f + ")");
final InternalNode<T> n = handle2Node.findExact(v, f, state);
if (n != null && n.extentLength < length && n.extent(transform).isPrefix(v)) {
if (DDDEBUG) System.err.println("Found extent of length " + n.extentLength);
a = n.extentLength;
top = n;
} else {
if (DDDEBUG) System.err.println("Missing");
b = f - 1;
}
}
checkMask >>= 1;
}
if (DDDEBUG) System.err.println("Final interval: [" + (a + 1) + ".." + b + "]; top: " + top);
return top;
} | protected InternalNode<T> fatBinarySearchExact(final LongArrayBitVector v, final long[] state, long a, long b) {
if (DDDEBUG) System.err.println("fatBinarySearchExact(" + v + ", [" + a + ".." + b + "])");
// We actually keep track of (a..b]
a--;
assert a <= b : a + " >= " + b;
final long length = v.length();
InternalNode<T> top = null;
long checkMask = -1L << Fast.ceilLog2(b - a);
while (a < b) {
assert checkMask != 0;
if (DDDEBUG) System.err.println("[" + (a + 1) + ".." + b + "]");
final long f = b & checkMask;
if ((a & checkMask) != f) {
if (DDDEBUG) System.err.println("Inquiring with key " + v.subVector(0, f) + " (" + f + ")");
final InternalNode<T> n = handle2Node.findExact(v, f, state);
if (n != null && n.extentLength < length && n.extent(transform).isPrefix(v)) {
if (DDDEBUG) System.err.println("Found extent of length " + n.extentLength);
a = n.extentLength;
top = n;
} else {
if (DDDEBUG) System.err.println("Missing");
b = f - 1;
}
}
checkMask >>= 1;
}
if (DDDEBUG) System.err.println("Final interval: [" + (a + 1) + ".." + b + "]; top: " + top);
return top;
} | Reinstated check to avoid infinite loops | https://github.com/vigna/Sux4J/commit/ba927153e2b53cb41df2b74dd3deb8a6708a53e4 | null | null | src/it/unimi/dsi/sux4j/util/ZFastTrie.java | 0 | java | false | 2021-03-26T00:13:26Z |
def _llm_type(self) -> str:
"""Return type of llm."""
return "alpeh_alpha" | def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha" | merge | https://github.com/hwchase17/langchain/commit/37f4f246d797db6eecfab0e35748101a33667b12 | null | null | langchain/llms/aleph_alpha.py | 0 | py | false | 2023-06-20T08:05:46Z |
static bool nfc_worker_read_mf_classic(NfcWorker* nfc_worker, FuriHalNfcTxRxContext* tx_rx) {
furi_assert(nfc_worker->callback);
bool read_success = false;
nfc_debug_pcap_prepare_tx_rx(nfc_worker->debug_pcap_worker, tx_rx, false);
do {
// Try to read supported card
FURI_LOG_I(TAG, "Try read supported card ...");
for(size_t i = 0; i < NfcSupportedCardTypeEnd; i++) {
if(nfc_supported_card[i].protocol == NfcDeviceProtocolMifareClassic) {
if(nfc_supported_card[i].verify(nfc_worker, tx_rx)) {
if(nfc_supported_card[i].read(nfc_worker, tx_rx)) {
read_success = true;
nfc_supported_card[i].parse(nfc_worker->dev_data);
}
}
}
}
if(read_success) break;
// Try to read card with key cache
FURI_LOG_I(TAG, "Search for key cache ...");
if(nfc_worker->callback(NfcWorkerEventReadMfClassicLoadKeyCache, nfc_worker->context)) {
FURI_LOG_I(TAG, "Load keys cache success. Start reading");
uint8_t sectors_read =
mf_classic_update_card(tx_rx, &nfc_worker->dev_data->mf_classic_data);
uint8_t sectors_total =
mf_classic_get_total_sectors_num(nfc_worker->dev_data->mf_classic_data.type);
FURI_LOG_I(TAG, "Read %d sectors out of %d total", sectors_read, sectors_total);
read_success = (sectors_read == sectors_total);
}
} while(false);
return read_success;
} | static bool nfc_worker_read_mf_classic(NfcWorker* nfc_worker, FuriHalNfcTxRxContext* tx_rx) {
furi_assert(nfc_worker->callback);
bool read_success = false;
if(furi_hal_rtc_is_flag_set(FuriHalRtcFlagDebug)) {
reader_analyzer_prepare_tx_rx(nfc_worker->reader_analyzer, tx_rx, false);
reader_analyzer_start(nfc_worker->reader_analyzer, ReaderAnalyzerModeDebugLog);
}
do {
// Try to read supported card
FURI_LOG_I(TAG, "Try read supported card ...");
for(size_t i = 0; i < NfcSupportedCardTypeEnd; i++) {
if(nfc_supported_card[i].protocol == NfcDeviceProtocolMifareClassic) {
if(nfc_supported_card[i].verify(nfc_worker, tx_rx)) {
if(nfc_supported_card[i].read(nfc_worker, tx_rx)) {
read_success = true;
nfc_supported_card[i].parse(nfc_worker->dev_data);
}
}
}
}
if(read_success) break;
// Try to read card with key cache
FURI_LOG_I(TAG, "Search for key cache ...");
if(nfc_worker->callback(NfcWorkerEventReadMfClassicLoadKeyCache, nfc_worker->context)) {
FURI_LOG_I(TAG, "Load keys cache success. Start reading");
uint8_t sectors_read =
mf_classic_update_card(tx_rx, &nfc_worker->dev_data->mf_classic_data);
uint8_t sectors_total =
mf_classic_get_total_sectors_num(nfc_worker->dev_data->mf_classic_data.type);
FURI_LOG_I(TAG, "Read %d sectors out of %d total", sectors_read, sectors_total);
read_success = (sectors_read == sectors_total);
}
} while(false);
if(furi_hal_rtc_is_flag_set(FuriHalRtcFlagDebug)) {
reader_analyzer_stop(nfc_worker->reader_analyzer);
}
return read_success;
} | Merge branch 'dev' into mifare_ul_buffer_overflow | https://github.com/flipperdevices/flipperzero-firmware/commit/c3299268123d2767205cda8846635c43bf2f6d8a | null | null | lib/nfc/nfc_worker.c | 0 | c | false | 2022-09-05T10:12:03Z |
static inline bool
ecma_object_check_class_name_is_object (ecma_object_t *obj_p) /**< object */
{
#ifndef JERRY_NDEBUG
return (ecma_builtin_is_global (obj_p)
#if JERRY_BUILTIN_TYPEDARRAY
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ARRAYBUFFER_PROTOTYPE)
#if JERRY_BUILTIN_SHAREDARRAYBUFFER
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_SHARED_ARRAYBUFFER_PROTOTYPE)
#endif /* JERRY_BUILTIN_SHAREDARRAYBUFFER */
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_TYPEDARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_INT8ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_UINT8ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_INT16ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_UINT16ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_INT32ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_UINT32ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_FLOAT32ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_UINT8CLAMPEDARRAY_PROTOTYPE)
#if JERRY_NUMBER_TYPE_FLOAT64
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_FLOAT64ARRAY_PROTOTYPE)
#endif /* JERRY_NUMBER_TYPE_FLOAT64 */
#if JERRY_BUILTIN_BIGINT
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_BIGINT64ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_BIGUINT64ARRAY_PROTOTYPE)
#endif /* JERRY_BUILTIN_BIGINT */
#endif /* JERRY_BUILTIN_TYPEDARRAY */
#if JERRY_ESNEXT
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ARRAY_PROTOTYPE_UNSCOPABLES)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ARRAY_ITERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ITERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_STRING_ITERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_REGEXP_STRING_ITERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_EVAL_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_RANGE_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_REFERENCE_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_SYNTAX_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_GENERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_TYPE_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_AGGREGATE_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_URI_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_DATE_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_REGEXP_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_SYMBOL_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ASYNC_FUNCTION_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_PROMISE_PROTOTYPE)
#endif /* JERRY_ESNEXT */
#if JERRY_BUILTIN_CONTAINER
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_MAP_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_SET_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_WEAKMAP_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_WEAKSET_PROTOTYPE)
#if JERRY_ESNEXT
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_MAP_ITERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_SET_ITERATOR_PROTOTYPE)
#endif /* JERRY_ESNEXT */
#endif /* JERRY_BUILTIN_CONTAINER */
#if JERRY_BUILTIN_WEAKREF
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_WEAKREF_PROTOTYPE)
#endif /* JERRY_BUILTIN_WEAKREF */
#if JERRY_BUILTIN_DATAVIEW
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_DATAVIEW_PROTOTYPE)
#endif /* JERRY_BUILTIN_DATAVIEW */
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_OBJECT_PROTOTYPE));
#else /* JERRY_NDEBUG */
JERRY_UNUSED (obj_p);
return true;
#endif /* !JERRY_NDEBUG */
} | static inline bool
ecma_object_check_class_name_is_object (ecma_object_t *obj_p) /**< object */
{
#ifndef JERRY_NDEBUG
return (ecma_builtin_is_global (obj_p)
#if JERRY_BUILTIN_TYPEDARRAY
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ARRAYBUFFER_PROTOTYPE)
#if JERRY_BUILTIN_SHAREDARRAYBUFFER
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_SHARED_ARRAYBUFFER_PROTOTYPE)
#endif /* JERRY_BUILTIN_SHAREDARRAYBUFFER */
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_TYPEDARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_INT8ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_UINT8ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_INT16ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_UINT16ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_INT32ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_UINT32ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_FLOAT32ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_UINT8CLAMPEDARRAY_PROTOTYPE)
#if JERRY_NUMBER_TYPE_FLOAT64
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_FLOAT64ARRAY_PROTOTYPE)
#endif /* JERRY_NUMBER_TYPE_FLOAT64 */
#if JERRY_BUILTIN_BIGINT
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_BIGINT_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_BIGINT64ARRAY_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_BIGUINT64ARRAY_PROTOTYPE)
#endif /* JERRY_BUILTIN_BIGINT */
#endif /* JERRY_BUILTIN_TYPEDARRAY */
#if JERRY_ESNEXT
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ARRAY_PROTOTYPE_UNSCOPABLES)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ARRAY_ITERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ITERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_STRING_ITERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_REGEXP_STRING_ITERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_EVAL_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_RANGE_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_REFERENCE_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_SYNTAX_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_GENERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_TYPE_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_AGGREGATE_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_URI_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ERROR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_DATE_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_REGEXP_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_SYMBOL_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_ASYNC_FUNCTION_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_PROMISE_PROTOTYPE)
#endif /* JERRY_ESNEXT */
#if JERRY_BUILTIN_CONTAINER
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_MAP_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_SET_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_WEAKMAP_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_WEAKSET_PROTOTYPE)
#if JERRY_ESNEXT
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_MAP_ITERATOR_PROTOTYPE)
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_SET_ITERATOR_PROTOTYPE)
#endif /* JERRY_ESNEXT */
#endif /* JERRY_BUILTIN_CONTAINER */
#if JERRY_BUILTIN_WEAKREF
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_WEAKREF_PROTOTYPE)
#endif /* JERRY_BUILTIN_WEAKREF */
#if JERRY_BUILTIN_DATAVIEW
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_DATAVIEW_PROTOTYPE)
#endif /* JERRY_BUILTIN_DATAVIEW */
|| ecma_builtin_is (obj_p, ECMA_BUILTIN_ID_OBJECT_PROTOTYPE));
#else /* JERRY_NDEBUG */
JERRY_UNUSED (obj_p);
return true;
#endif /* !JERRY_NDEBUG */
} | Add missing object types for ecma_object_get_class_name
This patch fixes #4937 and fixes #4938.
JerryScript-DCO-1.0-Signed-off-by: Robert Fancsik robert.fancsik@h-lab.eu | https://github.com/jerryscript-project/jerryscript/commit/b0625e61819e1f5823f16f2463f5def4ab16bdfb | null | null | jerry-core/ecma/operations/ecma-objects.c | 0 | c | false | null |
public void updateControlsVisibility(boolean topControlsVisible, boolean bottomControlsVisible) {
AndroidUiHelper.updateVisibility(mapActivity.findViewById(R.id.map_center_info), topControlsVisible);
AndroidUiHelper.updateVisibility(mapActivity.findViewById(R.id.map_left_widgets_panel), topControlsVisible);
AndroidUiHelper.updateVisibility(mapActivity.findViewById(R.id.map_right_widgets_panel), topControlsVisible);
AndroidUiHelper.updateVisibility(mapActivity.findViewById(R.id.bottom_controls_container), bottomControlsVisible);
} | public void updateControlsVisibility(boolean topControlsVisible, boolean bottomControlsVisible) {
int topControlsVisibility = topControlsVisible ? View.VISIBLE : View.GONE;
AndroidUiHelper.setVisibility(mapActivity, topControlsVisibility,
R.id.map_center_info,
R.id.map_left_widgets_panel,
R.id.map_right_widgets_panel);
int bottomControlsVisibility = bottomControlsVisible ? View.VISIBLE : View.GONE;
AndroidUiHelper.setVisibility(mapActivity, bottomControlsVisibility,
R.id.bottom_controls_container);
} | Fix #14118 Osmand crashes when detail button is clicked | https://github.com/osmandapp/OsmAnd/commit/0d508430bd9012e0cd43ff0d4b19fa8f2e2b51d4 | null | null | OsmAnd/src/net/osmand/plus/views/mapwidgets/WidgetsVisibilityHelper.java | 0 | java | false | null |
def read_config(self, config, **kwargs):
self.server_name = config["server_name"]
self.server_context = config.get("server_context", None)
try:
parse_and_validate_server_name(self.server_name)
except ValueError as e:
raise ConfigError(str(e))
self.pid_file = self.abspath(config.get("pid_file"))
self.web_client_location = config.get("web_client_location", None)
self.soft_file_limit = config.get("soft_file_limit", 0)
self.daemonize = config.get("daemonize")
self.print_pidfile = config.get("print_pidfile")
self.user_agent_suffix = config.get("user_agent_suffix")
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
self.public_baseurl = config.get("public_baseurl") or "https://%s/" % (
self.server_name,
)
if self.public_baseurl[-1] != "/":
self.public_baseurl += "/"
# Whether to enable user presence.
self.use_presence = config.get("use_presence", True)
# Whether to update the user directory or not. This should be set to
# false only if we are updating the user directory in a worker
self.update_user_directory = config.get("update_user_directory", True)
# whether to enable the media repository endpoints. This should be set
# to false if the media repository is running as a separate endpoint;
# doing so ensures that we will not run cache cleanup jobs on the
# master, potentially causing inconsistency.
self.enable_media_repo = config.get("enable_media_repo", True)
# Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API.
self.require_auth_for_profile_requests = config.get(
"require_auth_for_profile_requests", False
)
# Whether to require sharing a room with a user to retrieve their
# profile data
self.limit_profile_requests_to_users_who_share_rooms = config.get(
"limit_profile_requests_to_users_who_share_rooms", False,
)
if "restrict_public_rooms_to_local_users" in config and (
"allow_public_rooms_without_auth" in config
or "allow_public_rooms_over_federation" in config
):
raise ConfigError(
"Can't use 'restrict_public_rooms_to_local_users' if"
" 'allow_public_rooms_without_auth' and/or"
" 'allow_public_rooms_over_federation' is set."
)
# Check if the legacy "restrict_public_rooms_to_local_users" flag is set. This
# flag is now obsolete but we need to check it for backward-compatibility.
if config.get("restrict_public_rooms_to_local_users", False):
self.allow_public_rooms_without_auth = False
self.allow_public_rooms_over_federation = False
else:
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.
self.allow_public_rooms_without_auth = config.get(
"allow_public_rooms_without_auth", False
)
# If set to 'true', allows any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'false'.
self.allow_public_rooms_over_federation = config.get(
"allow_public_rooms_over_federation", False
)
default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)
# Ensure room version is a str
default_room_version = str(default_room_version)
if default_room_version not in KNOWN_ROOM_VERSIONS:
raise ConfigError(
"Unknown default_room_version: %s, known room versions: %s"
% (default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
)
# Get the actual room version object rather than just the identifier
self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]
# whether to enable search. If disabled, new entries will not be inserted
# into the search tables and they will not be indexed. Users will receive
# errors when attempting to search for messages.
self.enable_search = config.get("enable_search", True)
self.filter_timeline_limit = config.get("filter_timeline_limit", 100)
# Whether we should block invites sent to users on this server
# (other than those sent by local server admins)
self.block_non_admin_invites = config.get("block_non_admin_invites", False)
# Whether to enable experimental MSC1849 (aka relations) support
self.experimental_msc1849_support_enabled = config.get(
"experimental_msc1849_support_enabled", True
)
# Options to control access by tracking MAU
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
self.max_mau_value = 0
if self.limit_usage_by_mau:
self.max_mau_value = config.get("max_mau_value", 0)
self.mau_stats_only = config.get("mau_stats_only", False)
self.mau_limits_reserved_threepids = config.get(
"mau_limit_reserved_threepids", []
)
self.mau_trial_days = config.get("mau_trial_days", 0)
self.mau_limit_alerting = config.get("mau_limit_alerting", True)
# How long to keep redacted events in the database in unredacted form
# before redacting them.
redaction_retention_period = config.get("redaction_retention_period", "7d")
if redaction_retention_period is not None:
self.redaction_retention_period = self.parse_duration(
redaction_retention_period
)
else:
self.redaction_retention_period = None
# How long to keep entries in the `users_ips` table.
user_ips_max_age = config.get("user_ips_max_age", "28d")
if user_ips_max_age is not None:
self.user_ips_max_age = self.parse_duration(user_ips_max_age)
else:
self.user_ips_max_age = None
# Options to disable HS
self.hs_disabled = config.get("hs_disabled", False)
self.hs_disabled_message = config.get("hs_disabled_message", "")
# Admin uri to direct users at should their instance become blocked
# due to resource constraints
self.admin_contact = config.get("admin_contact", None)
ip_range_blacklist = config.get(
"ip_range_blacklist", DEFAULT_IP_RANGE_BLACKLIST
)
# Attempt to create an IPSet from the given ranges
try:
self.ip_range_blacklist = IPSet(ip_range_blacklist)
except Exception as e:
raise ConfigError("Invalid range(s) provided in ip_range_blacklist.") from e
# Always blacklist 0.0.0.0, ::
self.ip_range_blacklist.update(["0.0.0.0", "::"])
try:
self.ip_range_whitelist = IPSet(config.get("ip_range_whitelist", ()))
except Exception as e:
raise ConfigError("Invalid range(s) provided in ip_range_whitelist.") from e
# The federation_ip_range_blacklist is used for backwards-compatibility
# and only applies to federation and identity servers. If it is not given,
# default to ip_range_blacklist.
federation_ip_range_blacklist = config.get(
"federation_ip_range_blacklist", ip_range_blacklist
)
try:
self.federation_ip_range_blacklist = IPSet(federation_ip_range_blacklist)
except Exception as e:
raise ConfigError(
"Invalid range(s) provided in federation_ip_range_blacklist."
) from e
# Always blacklist 0.0.0.0, ::
self.federation_ip_range_blacklist.update(["0.0.0.0", "::"])
self.start_pushers = config.get("start_pushers", True)
# (undocumented) option for torturing the worker-mode replication a bit,
# for testing. The value defines the number of milliseconds to pause before
# sending out any replication updates.
self.replication_torture_level = config.get("replication_torture_level")
# Whether to require a user to be in the room to add an alias to it.
# Defaults to True.
self.require_membership_for_aliases = config.get(
"require_membership_for_aliases", True
)
# Whether to allow per-room membership profiles through the send of membership
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
retention_config = config.get("retention")
if retention_config is None:
retention_config = {}
self.retention_enabled = retention_config.get("enabled", False)
retention_default_policy = retention_config.get("default_policy")
if retention_default_policy is not None:
self.retention_default_min_lifetime = retention_default_policy.get(
"min_lifetime"
)
if self.retention_default_min_lifetime is not None:
self.retention_default_min_lifetime = self.parse_duration(
self.retention_default_min_lifetime
)
self.retention_default_max_lifetime = retention_default_policy.get(
"max_lifetime"
)
if self.retention_default_max_lifetime is not None:
self.retention_default_max_lifetime = self.parse_duration(
self.retention_default_max_lifetime
)
if (
self.retention_default_min_lifetime is not None
and self.retention_default_max_lifetime is not None
and (
self.retention_default_min_lifetime
> self.retention_default_max_lifetime
)
):
raise ConfigError(
"The default retention policy's 'min_lifetime' can not be greater"
" than its 'max_lifetime'"
)
else:
self.retention_default_min_lifetime = None
self.retention_default_max_lifetime = None
if self.retention_enabled:
logger.info(
"Message retention policies support enabled with the following default"
" policy: min_lifetime = %s ; max_lifetime = %s",
self.retention_default_min_lifetime,
self.retention_default_max_lifetime,
)
self.retention_allowed_lifetime_min = retention_config.get(
"allowed_lifetime_min"
)
if self.retention_allowed_lifetime_min is not None:
self.retention_allowed_lifetime_min = self.parse_duration(
self.retention_allowed_lifetime_min
)
self.retention_allowed_lifetime_max = retention_config.get(
"allowed_lifetime_max"
)
if self.retention_allowed_lifetime_max is not None:
self.retention_allowed_lifetime_max = self.parse_duration(
self.retention_allowed_lifetime_max
)
if (
self.retention_allowed_lifetime_min is not None
and self.retention_allowed_lifetime_max is not None
and self.retention_allowed_lifetime_min
> self.retention_allowed_lifetime_max
):
raise ConfigError(
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
" greater than 'allowed_lifetime_max'"
)
self.retention_purge_jobs = [] # type: List[Dict[str, Optional[int]]]
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
if interval_config is None:
raise ConfigError(
"A retention policy's purge jobs configuration must have the"
" 'interval' key set."
)
interval = self.parse_duration(interval_config)
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
if shortest_max_lifetime is not None:
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
if longest_max_lifetime is not None:
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
if (
shortest_max_lifetime is not None
and longest_max_lifetime is not None
and shortest_max_lifetime > longest_max_lifetime
):
raise ConfigError(
"A retention policy's purge jobs configuration's"
" 'shortest_max_lifetime' value can not be greater than its"
" 'longest_max_lifetime' value."
)
self.retention_purge_jobs.append(
{
"interval": interval,
"shortest_max_lifetime": shortest_max_lifetime,
"longest_max_lifetime": longest_max_lifetime,
}
)
if not self.retention_purge_jobs:
self.retention_purge_jobs = [
{
"interval": self.parse_duration("1d"),
"shortest_max_lifetime": None,
"longest_max_lifetime": None,
}
]
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
# no_tls is not really supported any more, but let's grandfather it in
# here.
if config.get("no_tls", False):
l2 = []
for listener in self.listeners:
if listener.tls:
logger.info(
"Ignoring TLS-enabled listener on port %i due to no_tls",
listener.port,
)
else:
l2.append(listener)
self.listeners = l2
if not self.web_client_location:
_warn_if_webclient_configured(self.listeners)
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
@attr.s
class LimitRemoteRoomsConfig:
enabled = attr.ib(
validator=attr.validators.instance_of(bool), default=False
)
complexity = attr.ib(
validator=attr.validators.instance_of(
(float, int) # type: ignore[arg-type] # noqa
),
default=1.0,
)
complexity_error = attr.ib(
validator=attr.validators.instance_of(str),
default=ROOM_COMPLEXITY_TOO_GREAT,
)
admins_can_join = attr.ib(
validator=attr.validators.instance_of(bool), default=False
)
self.limit_remote_rooms = LimitRemoteRoomsConfig(
**(config.get("limit_remote_rooms") or {})
)
bind_port = config.get("bind_port")
if bind_port:
if config.get("no_tls", False):
raise ConfigError("no_tls is incompatible with bind_port")
self.listeners = []
bind_host = config.get("bind_host", "")
gzip_responses = config.get("gzip_responses", True)
http_options = HttpListenerConfig(
resources=[
HttpResourceConfig(names=["client"], compress=gzip_responses),
HttpResourceConfig(names=["federation"]),
],
)
self.listeners.append(
ListenerConfig(
port=bind_port,
bind_addresses=[bind_host],
tls=True,
type="http",
http_options=http_options,
)
)
unsecure_port = config.get("unsecure_port", bind_port - 400)
if unsecure_port:
self.listeners.append(
ListenerConfig(
port=unsecure_port,
bind_addresses=[bind_host],
tls=False,
type="http",
http_options=http_options,
)
)
manhole = config.get("manhole")
if manhole:
self.listeners.append(
ListenerConfig(
port=manhole, bind_addresses=["127.0.0.1"], type="manhole",
)
)
metrics_port = config.get("metrics_port")
if metrics_port:
logger.warning(METRICS_PORT_WARNING)
self.listeners.append(
ListenerConfig(
port=metrics_port,
bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")],
type="http",
http_options=HttpListenerConfig(
resources=[HttpResourceConfig(names=["metrics"])]
),
)
)
self.cleanup_extremities_with_dummy_events = config.get(
"cleanup_extremities_with_dummy_events", True
)
# The number of forward extremities in a room needed to send a dummy event.
self.dummy_events_threshold = config.get("dummy_events_threshold", 10)
self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver, and instead return a 200 with a fake sid if this kind of error is
# met, without sending anything.
# This is a compromise between sending an email, which could be a spam vector,
# and letting the client know which email address is bound to an account and
# which one isn't.
self.request_token_inhibit_3pid_errors = config.get(
"request_token_inhibit_3pid_errors", False,
)
# List of users trialing the new experimental default push rules. This setting is
# not included in the sample configuration file on purpose as it's a temporary
# hack, so that some users can trial the new defaults without impacting every
# user on the homeserver.
users_new_default_push_rules = (
config.get("users_new_default_push_rules") or []
) # type: list
if not isinstance(users_new_default_push_rules, list):
raise ConfigError("'users_new_default_push_rules' must be a list")
# Turn the list into a set to improve lookup speed.
self.users_new_default_push_rules = set(
users_new_default_push_rules
) # type: set
# Whitelist of domain names that given next_link parameters must have
next_link_domain_whitelist = config.get(
"next_link_domain_whitelist"
) # type: Optional[List[str]]
self.next_link_domain_whitelist = None # type: Optional[Set[str]]
if next_link_domain_whitelist is not None:
if not isinstance(next_link_domain_whitelist, list):
raise ConfigError("'next_link_domain_whitelist' must be a list")
# Turn the list into a set to improve lookup speed.
self.next_link_domain_whitelist = set(next_link_domain_whitelist) | def read_config(self, config, **kwargs):
self.server_name = config["server_name"]
self.server_context = config.get("server_context", None)
try:
parse_and_validate_server_name(self.server_name)
except ValueError as e:
raise ConfigError(str(e))
self.pid_file = self.abspath(config.get("pid_file"))
self.web_client_location = config.get("web_client_location", None)
self.soft_file_limit = config.get("soft_file_limit", 0)
self.daemonize = config.get("daemonize")
self.print_pidfile = config.get("print_pidfile")
self.user_agent_suffix = config.get("user_agent_suffix")
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
self.public_baseurl = config.get("public_baseurl") or "https://%s/" % (
self.server_name,
)
if self.public_baseurl[-1] != "/":
self.public_baseurl += "/"
# Whether to enable user presence.
self.use_presence = config.get("use_presence", True)
# Whether to update the user directory or not. This should be set to
# false only if we are updating the user directory in a worker
self.update_user_directory = config.get("update_user_directory", True)
# whether to enable the media repository endpoints. This should be set
# to false if the media repository is running as a separate endpoint;
# doing so ensures that we will not run cache cleanup jobs on the
# master, potentially causing inconsistency.
self.enable_media_repo = config.get("enable_media_repo", True)
# Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API.
self.require_auth_for_profile_requests = config.get(
"require_auth_for_profile_requests", False
)
# Whether to require sharing a room with a user to retrieve their
# profile data
self.limit_profile_requests_to_users_who_share_rooms = config.get(
"limit_profile_requests_to_users_who_share_rooms", False,
)
if "restrict_public_rooms_to_local_users" in config and (
"allow_public_rooms_without_auth" in config
or "allow_public_rooms_over_federation" in config
):
raise ConfigError(
"Can't use 'restrict_public_rooms_to_local_users' if"
" 'allow_public_rooms_without_auth' and/or"
" 'allow_public_rooms_over_federation' is set."
)
# Check if the legacy "restrict_public_rooms_to_local_users" flag is set. This
# flag is now obsolete but we need to check it for backward-compatibility.
if config.get("restrict_public_rooms_to_local_users", False):
self.allow_public_rooms_without_auth = False
self.allow_public_rooms_over_federation = False
else:
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.
self.allow_public_rooms_without_auth = config.get(
"allow_public_rooms_without_auth", False
)
# If set to 'true', allows any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'false'.
self.allow_public_rooms_over_federation = config.get(
"allow_public_rooms_over_federation", False
)
default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)
# Ensure room version is a str
default_room_version = str(default_room_version)
if default_room_version not in KNOWN_ROOM_VERSIONS:
raise ConfigError(
"Unknown default_room_version: %s, known room versions: %s"
% (default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
)
# Get the actual room version object rather than just the identifier
self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]
# whether to enable search. If disabled, new entries will not be inserted
# into the search tables and they will not be indexed. Users will receive
# errors when attempting to search for messages.
self.enable_search = config.get("enable_search", True)
self.filter_timeline_limit = config.get("filter_timeline_limit", 100)
# Whether we should block invites sent to users on this server
# (other than those sent by local server admins)
self.block_non_admin_invites = config.get("block_non_admin_invites", False)
# Whether to enable experimental MSC1849 (aka relations) support
self.experimental_msc1849_support_enabled = config.get(
"experimental_msc1849_support_enabled", True
)
# Options to control access by tracking MAU
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
self.max_mau_value = 0
if self.limit_usage_by_mau:
self.max_mau_value = config.get("max_mau_value", 0)
self.mau_stats_only = config.get("mau_stats_only", False)
self.mau_limits_reserved_threepids = config.get(
"mau_limit_reserved_threepids", []
)
self.mau_trial_days = config.get("mau_trial_days", 0)
self.mau_limit_alerting = config.get("mau_limit_alerting", True)
# How long to keep redacted events in the database in unredacted form
# before redacting them.
redaction_retention_period = config.get("redaction_retention_period", "7d")
if redaction_retention_period is not None:
self.redaction_retention_period = self.parse_duration(
redaction_retention_period
)
else:
self.redaction_retention_period = None
# How long to keep entries in the `users_ips` table.
user_ips_max_age = config.get("user_ips_max_age", "28d")
if user_ips_max_age is not None:
self.user_ips_max_age = self.parse_duration(user_ips_max_age)
else:
self.user_ips_max_age = None
# Options to disable HS
self.hs_disabled = config.get("hs_disabled", False)
self.hs_disabled_message = config.get("hs_disabled_message", "")
# Admin uri to direct users at should their instance become blocked
# due to resource constraints
self.admin_contact = config.get("admin_contact", None)
ip_range_blacklist = config.get(
"ip_range_blacklist", DEFAULT_IP_RANGE_BLACKLIST
)
# Attempt to create an IPSet from the given ranges
try:
# Always blacklist 0.0.0.0, ::
self.ip_range_blacklist = generate_ip_set(
ip_range_blacklist, ["0.0.0.0", "::"]
)
except Exception as e:
raise ConfigError("Invalid range(s) provided in ip_range_blacklist.") from e
try:
self.ip_range_whitelist = generate_ip_set(
config.get("ip_range_whitelist", ())
)
except Exception as e:
raise ConfigError("Invalid range(s) provided in ip_range_whitelist.") from e
# The federation_ip_range_blacklist is used for backwards-compatibility
# and only applies to federation and identity servers. If it is not given,
# default to ip_range_blacklist.
federation_ip_range_blacklist = config.get(
"federation_ip_range_blacklist", ip_range_blacklist
)
try:
# Always blacklist 0.0.0.0, ::
self.federation_ip_range_blacklist = generate_ip_set(
federation_ip_range_blacklist, ["0.0.0.0", "::"]
)
except Exception as e:
raise ConfigError(
"Invalid range(s) provided in federation_ip_range_blacklist."
) from e
self.start_pushers = config.get("start_pushers", True)
# (undocumented) option for torturing the worker-mode replication a bit,
# for testing. The value defines the number of milliseconds to pause before
# sending out any replication updates.
self.replication_torture_level = config.get("replication_torture_level")
# Whether to require a user to be in the room to add an alias to it.
# Defaults to True.
self.require_membership_for_aliases = config.get(
"require_membership_for_aliases", True
)
# Whether to allow per-room membership profiles through the send of membership
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
retention_config = config.get("retention")
if retention_config is None:
retention_config = {}
self.retention_enabled = retention_config.get("enabled", False)
retention_default_policy = retention_config.get("default_policy")
if retention_default_policy is not None:
self.retention_default_min_lifetime = retention_default_policy.get(
"min_lifetime"
)
if self.retention_default_min_lifetime is not None:
self.retention_default_min_lifetime = self.parse_duration(
self.retention_default_min_lifetime
)
self.retention_default_max_lifetime = retention_default_policy.get(
"max_lifetime"
)
if self.retention_default_max_lifetime is not None:
self.retention_default_max_lifetime = self.parse_duration(
self.retention_default_max_lifetime
)
if (
self.retention_default_min_lifetime is not None
and self.retention_default_max_lifetime is not None
and (
self.retention_default_min_lifetime
> self.retention_default_max_lifetime
)
):
raise ConfigError(
"The default retention policy's 'min_lifetime' can not be greater"
" than its 'max_lifetime'"
)
else:
self.retention_default_min_lifetime = None
self.retention_default_max_lifetime = None
if self.retention_enabled:
logger.info(
"Message retention policies support enabled with the following default"
" policy: min_lifetime = %s ; max_lifetime = %s",
self.retention_default_min_lifetime,
self.retention_default_max_lifetime,
)
self.retention_allowed_lifetime_min = retention_config.get(
"allowed_lifetime_min"
)
if self.retention_allowed_lifetime_min is not None:
self.retention_allowed_lifetime_min = self.parse_duration(
self.retention_allowed_lifetime_min
)
self.retention_allowed_lifetime_max = retention_config.get(
"allowed_lifetime_max"
)
if self.retention_allowed_lifetime_max is not None:
self.retention_allowed_lifetime_max = self.parse_duration(
self.retention_allowed_lifetime_max
)
if (
self.retention_allowed_lifetime_min is not None
and self.retention_allowed_lifetime_max is not None
and self.retention_allowed_lifetime_min
> self.retention_allowed_lifetime_max
):
raise ConfigError(
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
" greater than 'allowed_lifetime_max'"
)
self.retention_purge_jobs = [] # type: List[Dict[str, Optional[int]]]
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
if interval_config is None:
raise ConfigError(
"A retention policy's purge jobs configuration must have the"
" 'interval' key set."
)
interval = self.parse_duration(interval_config)
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
if shortest_max_lifetime is not None:
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
if longest_max_lifetime is not None:
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
if (
shortest_max_lifetime is not None
and longest_max_lifetime is not None
and shortest_max_lifetime > longest_max_lifetime
):
raise ConfigError(
"A retention policy's purge jobs configuration's"
" 'shortest_max_lifetime' value can not be greater than its"
" 'longest_max_lifetime' value."
)
self.retention_purge_jobs.append(
{
"interval": interval,
"shortest_max_lifetime": shortest_max_lifetime,
"longest_max_lifetime": longest_max_lifetime,
}
)
if not self.retention_purge_jobs:
self.retention_purge_jobs = [
{
"interval": self.parse_duration("1d"),
"shortest_max_lifetime": None,
"longest_max_lifetime": None,
}
]
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
# no_tls is not really supported any more, but let's grandfather it in
# here.
if config.get("no_tls", False):
l2 = []
for listener in self.listeners:
if listener.tls:
logger.info(
"Ignoring TLS-enabled listener on port %i due to no_tls",
listener.port,
)
else:
l2.append(listener)
self.listeners = l2
if not self.web_client_location:
_warn_if_webclient_configured(self.listeners)
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
@attr.s
class LimitRemoteRoomsConfig:
enabled = attr.ib(
validator=attr.validators.instance_of(bool), default=False
)
complexity = attr.ib(
validator=attr.validators.instance_of(
(float, int) # type: ignore[arg-type] # noqa
),
default=1.0,
)
complexity_error = attr.ib(
validator=attr.validators.instance_of(str),
default=ROOM_COMPLEXITY_TOO_GREAT,
)
admins_can_join = attr.ib(
validator=attr.validators.instance_of(bool), default=False
)
self.limit_remote_rooms = LimitRemoteRoomsConfig(
**(config.get("limit_remote_rooms") or {})
)
bind_port = config.get("bind_port")
if bind_port:
if config.get("no_tls", False):
raise ConfigError("no_tls is incompatible with bind_port")
self.listeners = []
bind_host = config.get("bind_host", "")
gzip_responses = config.get("gzip_responses", True)
http_options = HttpListenerConfig(
resources=[
HttpResourceConfig(names=["client"], compress=gzip_responses),
HttpResourceConfig(names=["federation"]),
],
)
self.listeners.append(
ListenerConfig(
port=bind_port,
bind_addresses=[bind_host],
tls=True,
type="http",
http_options=http_options,
)
)
unsecure_port = config.get("unsecure_port", bind_port - 400)
if unsecure_port:
self.listeners.append(
ListenerConfig(
port=unsecure_port,
bind_addresses=[bind_host],
tls=False,
type="http",
http_options=http_options,
)
)
manhole = config.get("manhole")
if manhole:
self.listeners.append(
ListenerConfig(
port=manhole, bind_addresses=["127.0.0.1"], type="manhole",
)
)
metrics_port = config.get("metrics_port")
if metrics_port:
logger.warning(METRICS_PORT_WARNING)
self.listeners.append(
ListenerConfig(
port=metrics_port,
bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")],
type="http",
http_options=HttpListenerConfig(
resources=[HttpResourceConfig(names=["metrics"])]
),
)
)
self.cleanup_extremities_with_dummy_events = config.get(
"cleanup_extremities_with_dummy_events", True
)
# The number of forward extremities in a room needed to send a dummy event.
self.dummy_events_threshold = config.get("dummy_events_threshold", 10)
self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver, and instead return a 200 with a fake sid if this kind of error is
# met, without sending anything.
# This is a compromise between sending an email, which could be a spam vector,
# and letting the client know which email address is bound to an account and
# which one isn't.
self.request_token_inhibit_3pid_errors = config.get(
"request_token_inhibit_3pid_errors", False,
)
# List of users trialing the new experimental default push rules. This setting is
# not included in the sample configuration file on purpose as it's a temporary
# hack, so that some users can trial the new defaults without impacting every
# user on the homeserver.
users_new_default_push_rules = (
config.get("users_new_default_push_rules") or []
) # type: list
if not isinstance(users_new_default_push_rules, list):
raise ConfigError("'users_new_default_push_rules' must be a list")
# Turn the list into a set to improve lookup speed.
self.users_new_default_push_rules = set(
users_new_default_push_rules
) # type: set
# Whitelist of domain names that given next_link parameters must have
next_link_domain_whitelist = config.get(
"next_link_domain_whitelist"
) # type: Optional[List[str]]
self.next_link_domain_whitelist = None # type: Optional[Set[str]]
if next_link_domain_whitelist is not None:
if not isinstance(next_link_domain_whitelist, list):
raise ConfigError("'next_link_domain_whitelist' must be a list")
# Turn the list into a set to improve lookup speed.
self.next_link_domain_whitelist = set(next_link_domain_whitelist) | Apply additional IPv6 to custom blacklists. | https://github.com/matrix-org/synapse/commit/4e1c7bbb311caa7dde8c1b98f774da7cb5668ddf | null | null | synapse/config/server.py | 0 | py | false | 2021-01-27T15:21:33Z |
function lsStream (cache) {
const indexDir = bucketDir(cache)
const stream = new Minipass({ objectMode: true })
readdirOrEmpty(indexDir).then(buckets => Promise.all(
buckets.map(bucket => {
const bucketPath = path.join(indexDir, bucket)
return readdirOrEmpty(bucketPath).then(subbuckets => Promise.all(
subbuckets.map(subbucket => {
const subbucketPath = path.join(bucketPath, subbucket)
// "/cachename/<bucket 0xFF>/<bucket 0xFF>./*"
return readdirOrEmpty(subbucketPath).then(entries => Promise.all(
entries.map(entry => {
const entryPath = path.join(subbucketPath, entry)
return bucketEntries(entryPath).then(entries =>
// using a Map here prevents duplicate keys from
// showing up twice, I guess?
entries.reduce((acc, entry) => {
acc.set(entry.key, entry)
return acc
}, new Map())
).then(reduced => {
// reduced is a map of key => entry
for (const entry of reduced.values()) {
const formatted = formatEntry(cache, entry)
if (formatted) {
stream.write(formatted)
}
}
}).catch(err => {
if (err.code === 'ENOENT') {
return undefined
}
throw err
})
})
))
})
))
})
))
.then(
() => stream.end(),
err => stream.emit('error', err)
)
return stream
} | function lsStream (cache) {
const indexDir = bucketDir(cache)
const stream = new Minipass({ objectMode: true })
// Set all this up to run on the stream and then just return the stream
Promise.resolve().then(async () => {
const buckets = await readdirOrEmpty(indexDir)
await Promise.all(buckets.map(async (bucket) => {
const bucketPath = path.join(indexDir, bucket)
const subbuckets = await readdirOrEmpty(bucketPath)
await Promise.all(subbuckets.map(async (subbucket) => {
const subbucketPath = path.join(bucketPath, subbucket)
// "/cachename/<bucket 0xFF>/<bucket 0xFF>./*"
const subbucketEntries = await readdirOrEmpty(subbucketPath)
await Promise.all(subbucketEntries.map(async (entry) => {
const entryPath = path.join(subbucketPath, entry)
try {
const entries = await bucketEntries(entryPath)
// using a Map here prevents duplicate keys from showing up
// twice, I guess?
const reduced = entries.reduce((acc, entry) => {
acc.set(entry.key, entry)
return acc
}, new Map())
// reduced is a map of key => entry
for (const entry of reduced.values()) {
const formatted = formatEntry(cache, entry)
if (formatted) {
stream.write(formatted)
}
}
} catch (err) {
if (err.code === 'ENOENT') {
return undefined
}
throw err
}
}))
}))
}))
stream.end()
}).catch(err => stream.emit('error', err))
return stream
} | deps: upgrade npm to 8.11.0 | https://github.com/nodejs/node/commit/ebd4e9c165627e473cf043d2fafbc9862d11dae2 | CVE-2022-29244 | ['CWE-200', 'NVD-CWE-noinfo'] | deps/npm/node_modules/cacache/lib/entry-index.js | 0 | js | false | 2022-05-25T21:26:36Z |
protected void registerModels(){
ArcanaDataGenerators.WOODS.forEach((name, texture) -> {
fenceInventory(name + "_fence_inventory", texture);
fencePost(name + "_fence", texture);
fenceSide(name + "_fence", texture);
fenceGate(name + "_fence_gate", texture);
fenceGateOpen(name + "_fence_gate", texture);
fenceGateWall(name + "_fence_gate", texture);
fenceGateWallOpen(name + "_fence_gate", texture);
});
ArcanaDataGenerators.STONES.forEach((name, texture) -> {
cubeAll(name, texture);
slab(name + "_slab", texture, texture, texture);
slabTop(name + "_slab_top", texture, texture, texture);
stairs(name + "_stairs", texture, texture, texture);
stairsInner(name + "_stairs_inner", texture, texture, texture);
stairsOuter(name + "_stairs_outer", texture, texture, texture);
pressurePlate(name, texture);
wallInventory(name + "_wall_inventory", texture);
wallPost(name + "_wall_post", texture);
wallSide(name + "_wall_side", texture);
});
cubeAll("rough_limestone", new ResourceLocation(Arcana.MODID, "block/rough_limestone"));
cubeAll("smooth_limestone", new ResourceLocation(Arcana.MODID, "block/smooth_limestone"));
cubeAll("pridestone_bricks", new ResourceLocation(Arcana.MODID, "block/pridestone_bricks"));
cubeAll("pridestone_small_bricks", new ResourceLocation(Arcana.MODID, "block/pridestone_small_bricks"));
cubeAll("wet_pridestone", new ResourceLocation(Arcana.MODID, "block/wet_pridestone"));
cubeAll("wet_smooth_pridestone", new ResourceLocation(Arcana.MODID, "block/wet_smooth_pridestone"));
cubeAll("pridestone", new ResourceLocation(Arcana.MODID, "block/pridestone"));
cubeAll("smooth_pridestone", new ResourceLocation(Arcana.MODID, "block/smooth_pridestone"));
cubeAll("prideclay", new ResourceLocation(Arcana.MODID, "block/prideclay"));
cubeAll("gilded_prideclay", new ResourceLocation(Arcana.MODID, "block/gilded_prideclay"));
cubeAll("chiseled_prideful_gold_block", new ResourceLocation(Arcana.MODID, "block/chiseled_prideful_gold_block"));
cubeAll("carved_prideful_gold_block", new ResourceLocation(Arcana.MODID, "block/carved_prideful_gold_block"));
cubeAll("prideful_gold_block", new ResourceLocation(Arcana.MODID, "block/prideful_gold_block"));
cubeAll("prideful_gold_tile", new ResourceLocation(Arcana.MODID, "block/prideful_gold_tile"));
cubeAll("tainted_granite",new ResourceLocation(Arcana.MODID, "block/tainted_granite"));
cubeAll("tainted_diorite",new ResourceLocation(Arcana.MODID, "block/tainted_diorite"));
cubeAll("tainted_andesite",new ResourceLocation(Arcana.MODID, "block/tainted_andesite"));
cubeAll("silver_block", new ResourceLocation(Arcana.MODID, "block/silver_block"));
cubeAll("silver_ore", new ResourceLocation(Arcana.MODID, "block/silver_ore"));
cubeAll("void_metal_block", new ResourceLocation(Arcana.MODID, "block/void_metal_block"));
} | protected void registerModels(){
ArcanaDataGenerators.WOODS.forEach((name, texture) -> {
slab(name + "_slab", texture, texture, texture);
slabTop(name + "_slab_top", texture, texture, texture);
stairs(name + "_stairs", texture, texture, texture);
stairsInner(name + "_stairs_inner", texture, texture, texture);
stairsOuter(name + "_stairs_outer", texture, texture, texture);
pressurePlate(name, texture);
fenceInventory(name + "_fence_inventory", texture);
fencePost(name + "_fence", texture);
fenceSide(name + "_fence", texture);
fenceGate(name + "_fence_gate", texture);
fenceGateOpen(name + "_fence_gate", texture);
fenceGateWall(name + "_fence_gate", texture);
fenceGateWallOpen(name + "_fence_gate", texture);
});
ArcanaDataGenerators.STONES.forEach((name, texture) -> {
cubeAll(name, texture);
slab(name + "_slab", texture, texture, texture);
slabTop(name + "_slab_top", texture, texture, texture);
stairs(name + "_stairs", texture, texture, texture);
stairsInner(name + "_stairs_inner", texture, texture, texture);
stairsOuter(name + "_stairs_outer", texture, texture, texture);
pressurePlate(name, texture);
wallInventory(name + "_wall_inventory", texture);
wallPost(name + "_wall_post", texture);
wallSide(name + "_wall_side", texture);
});
cubeAll("tainted_granite",new ResourceLocation(Arcana.MODID, "block/tainted_granite"));
cubeAll("tainted_diorite",new ResourceLocation(Arcana.MODID, "block/tainted_diorite"));
cubeAll("tainted_andesite",new ResourceLocation(Arcana.MODID, "block/tainted_andesite"));
cubeAll("silver_block", new ResourceLocation(Arcana.MODID, "block/silver_block"));
cubeAll("silver_ore", new ResourceLocation(Arcana.MODID, "block/silver_ore"));
cubeAll("void_metal_block", new ResourceLocation(Arcana.MODID, "block/void_metal_block"));
} | Jar recipes | https://github.com/ArcanaMod/Arcana/commit/63e3b12b2619d1ecdeea987a0282b82b7e96a000 | null | null | src/main/java/net/arcanamod/datagen/BlockModels.java | 0 | java | false | null |
function Ge(e){return null!=(a=e)&&He(a.length)&&!Ie(a)?we(e):xe(e);var a} | function Ge(e){return na(e)||ta(e)||!!(re&&e&&e[re])} | YetiForce CRM ver. 6.4.0 (#16359)
* Added improvements in record collector
* Integration with UaYouControl.php (#16293)
Co-authored-by: Mariusz Krzaczkowski <m.krzaczkowski@yetiforce.com>
* Integration with UaYouControl.php (#16293)
* Add external link to NoBrregEnhetsregisteret. (#16292)
* Add external link to NoBrregEnhetsregisteret. #16292
* Add NorthData to RecordCollectors. (#16278)
* Add NorthData to RecordCollectors.
* Change docs.
Co-authored-by: Mariusz Krzaczkowski <m.krzaczkowski@yetiforce.com>
* Fix #16311
* Added conditions wizard for 'Update related record' workflow action
* Add NorthData to RecordCollectors. (#16278)
* Code improvements
* Added improvements in record collector
* Zefix integraion [in progress] (#16281)
* Zefix integraion [in progress]
* ChZefix integration.
Co-authored-by: Mariusz Krzaczkowski <m.krzaczkowski@yetiforce.com>
* Improved workflow action
* Added improvements in record collector
* Improvements in the store
* Update RecordCollector tests
* Code improvements
* Improved ConfReport
* languages/en-US/Other/RecordCollector.json
* Improved change module type
* Improved default dashboard in api portal
* Fix Send PDF workflow task
* Improved default dashboard in api
* Fixed attachments in 'Emails to send' panel
* lib_roundcube 0.3.0 Roundcube Webmail 1.6.0
* tests
* tests
* Update tests.yml
* Added improvements in record collector
* tests/setup/dependency.sh
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* Code improvements
* Update dependencies
* Added minor improvements
* tests
* Added improvements in record collector
* Added improvements in record collector
* Added minor improvements
* Added minor improvements
* Added minor improvements
* Improved import file button
* Improved imap connection
* Fix #16317 - list view entries count
* Added minor code improvements
* Improved menu items
* Added improvements in record collector
* Fix gantt view (#15772)
* Added improvements in record collector
* Added improvements in record collector
* Added improvements in record collector
* Updated graphics in store
* Update install translations
* Update fonts
* Improved OSSMail template
* Updated graphics in store
* Update fonts
* tests
* tests Validator
* Update icons
* Improved widgets permissions (#15613)
* Increase scrolling speed (#15031)
* Added tracking to media management
* Added improvements in record collector
* Added improvements in record collector
* Added improvements in record collector
* Added minor code improvements
* tests
* Added minor code improvements
* Fixed #15164 (#16319)
* Some changes in Import module (#16318)
* Code formatting
* Added improvements in record collector
* Change the library "sonata-project / google-authenticator" to "pragmarx/google2fa"
* Update dependencies
* Update dependencies
* Updated *.min and *.map files
* Change the library "sonata-project / google-authenticator" to "pragmarx/google2fa"
* Added minor improvements in Composer::install
* Update dev dependency
* Added dropdown button to record collectors (#16322)
* Corrected Record collectors table width (#16323)
* Fixed #15183 modulesMapRelatedFields don`t work correct for multipicklist
* Added minor improvements in Credits
* Fix edit view header links
* Improved Inventory panel and PDF widget
* Added improvements in record collector
* Added improvements in record collector
* Update install translations
* #16282 Improved the handler from getting coordinates to the map
* Added minor code improvements
* Fixed getting reference module in inventory name field (#16329)
* Missing icons update
* Improved tree field type
* Improved tests and some code
* Fix tree field type
* Fix scheme for tree data table
* Improved switch users
* Improved YetiForce CLI
* [PROD](renovate) Update dependency github/super-linter to v4.9.6 (#16324)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
* Bump giggsey/libphonenumber-for-php from 8.12.52 to 8.12.53 (#16331)
Bumps [giggsey/libphonenumber-for-php](https://github.com/giggsey/libphonenumber-for-php) from 8.12.52 to 8.12.53.
- [Release notes](https://github.com/giggsey/libphonenumber-for-php/releases)
- [Commits](https://github.com/giggsey/libphonenumber-for-php/compare/8.12.52...8.12.53)
---
updated-dependencies:
- dependency-name: giggsey/libphonenumber-for-php
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Improved switch users
* Improved switch users
* Unused code has been removed
* Fix tree field type
* Added improvements in record collector
* Improved integration with DAV
* Improved conditions wizard for 'Update related record' workflow action
* Fixed focus to search text field when click on select2 drop down in modal window
* Added improvements in record collector
* Added minor code improvements
* Improved ConfReport
* Improved .htaccess
* Added minor code improvements
* Improved ConfReport
* Fix icon on tree field type and change icon management view
* Removed unused code. "Is added" - condition in workflows (#16321)
* Correct setting of check boxes of Inventory boolean fields depending on their values. (#16326)
* Update Inventory.js
Now the check-boxes of Inventory boolean fields will be set correctly regarding to their content.
* README.md (#16332)
* Improve inventory auto fill
* Improved getting data from smtp (#16334)
* Fixed #13136 (#16335)
* Improved DB structure for map table cache
* Improved updating payment status (#16327)
* Improved updating payment status
* Corrected translation (#16336)
* Removed translation (#16337)
* A functionality has been added to unlock e-mail accounts
* Fix #13486
* Update dependencies
* mbstring.func_overload
* Added priority to CalendarActivities and OverdueActivities dashboard … (#16276)
* Added priority to CalendarActivities and OverdueActivities dashboard widgets
* Added improvement
* Hidden icon for previewing replies in comments (#16339)
* The display of the multi email field has been improved
* Added working time counter widget. (#16316)
* Added working time counter widget.
* Added translation
* Added improvements
* Removed varialbe
* Corrected comment
* Added title to buttons
* Added type to variable
* Removed redundant characters
* Added working time counter widget. #16316
* Added minor improvements
* Improoved dashboard titles
* Updated *.min and *.map files
* Added minor improvements in languages
* Updated translation
* Improvements have been added to the integration with WAPRO ERP
* Update install translations
* Update translations
* Update translations
* Added improvements in record collector
* Added improvements in record collector
* Improved input data cleanup
* Improved RSS
* Improved Rss
* Update all Yarn dependencies (2022-08-15) (#16344)
Co-authored-by: depfu[bot] <23717796+depfu[bot]@users.noreply.github.com>
* Added improvements in record collector
* Improvements in the mechanism of generating PDF files
* YetiForcePDF update v0.1.40 & Update dependencies
* Improved some config templates
* Added minor improvements
* Remove unnecessary code
* .github/workflows/actions.yml
* .github/workflows/actions.yml
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* Improved Db importer/updater
* Added buttons to the Working hours counter widget (#16340)
* Added buttons to the Working hours counter widget
* Added translations
* Improved widget
* Added button lock when starting timing
* Update translations
* Added missing translation
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* Added missing translation
* .github/workflows/tests.yml
* Removed Translation (#16347)
Co-authored-by: Radosław Skrzypczak <r.skrzypczak@yetiforce.com>
* Update install translations
* Added minor improvement in get actual version of PHP
* Update install translations
* Updated *.min and *.map files
* Redundant code has been removed
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* Improved RSS
* Added improvements
* Update DEV dependencies
* Fix Completions initialization in comments widget (#16348)
* Update fonts
* Fixed sending files in API for PUT method
* Update DEV dependencies
* Improved valid of time in Business Hours (#16351)
* Improved executing workflow when an unsupported operator is selected (#16352)
* Improved executing workflow when an unsupported operator is selected
* Improved getting translation (#16350)
* Improved Importer
* Improved working time counter widget
* Improved api
* Expansion of the tests
* Expansion of the tests
* tests
* Update DEV dependencies
* Improved Rss
* tests
* Value display secured
* Added improvements
* Improved index name
* Improved validation of quantity field (#16355)
* Improved validation of quantity field
* Improved code
* Add missing picklist dependencies
* Added validation whether at least one business day has been selected in the Business hours module (#16356)
* Compile js
* Moved swagger file
* Improved swagger generating functions
* Added minor improvements
* Fixed issue with date format
* Added improvements
* Fixed a bug when selecting all users in the calendar quick edit view (#16357)
* Improved swagger generating functions
* Added improvements
* Added improvements
* Added improvements
* Improved Address Search panel
* Improved Emails to send panel
* Fix action name
* Fix description in docBlock
* tests/Settings/ApiAddress.php
* Compile js
* tests/Settings/ApiAddress.php
* tests/Settings/ApiAddress.php
* Remove html unnecessary class
* Fixed #14266 (#16349)
* [PROD](renovate) Update debian Docker tag to v11 (#16341)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
* Improved anonymization
* Added improvements
* Update install translations
* Improved config class
* Added improvements
* Improved generatedtype for some fields
* Fixed #15631 (#16358)
* Added improvements
* Improved block sequence
* 6.4.0
Co-authored-by: rembiesa <103192653+rembiesa@users.noreply.github.com>
Co-authored-by: Radosław Skrzypczak <r.skrzypczak@yetiforce.com>
Co-authored-by: Adrian Koń <a.kon@yetiforce.com>
Co-authored-by: bmankowski <bmankowski@gmail.com>
Co-authored-by: Arek Solek <arkadiusz_s9887@wp.pl>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jared Ramon Elizan <elizanjaredr@gmail.com>
Co-authored-by: depfu[bot] <23717796+depfu[bot]@users.noreply.github.com> | https://github.com/yetiforcecompany/yetiforcecrm/commit/2c14baaf8dbc7fd82d5c585f2fa0c23528450618 | CVE-2022-1340 | ['CWE-79'] | public_html/layouts/basic/modules/Chat/Chat.vue.js | 0 | js | false | 2022-08-19T11:40:16Z |
static belle_sip_messageParser_max_forwards_return
max_forwards(pbelle_sip_messageParser ctx)
{
belle_sip_messageParser_max_forwards_return retval;
/* Initialize rule variables
*/
retval.start = LT(1); retval.stop = retval.start;
{
// ../grammars/belle_sip_message.g:1036:13: ( ( DIGIT )+ )
// ../grammars/belle_sip_message.g:1036:14: ( DIGIT )+
{
// ../grammars/belle_sip_message.g:1036:14: ( DIGIT )+
{
int cnt93=0;
for (;;)
{
int alt93=2;
switch ( LA(1) )
{
case DIGIT:
{
alt93=1;
}
break;
}
switch (alt93)
{
case 1:
// ../grammars/belle_sip_message.g:1036:14: DIGIT
{
MATCHT(DIGIT, &FOLLOW_DIGIT_in_max_forwards3887);
if (HASEXCEPTION())
{
goto rulemax_forwardsEx;
}
if (HASFAILED())
{
return retval;
}
}
break;
default:
if ( cnt93 >= 1 )
{
goto loop93;
}
if (BACKTRACKING>0)
{
FAILEDFLAG = ANTLR3_TRUE;
return retval;
}
/* mismatchedSetEx()
*/
CONSTRUCTEX();
EXCEPTION->type = ANTLR3_EARLY_EXIT_EXCEPTION;
EXCEPTION->name = (void *)ANTLR3_EARLY_EXIT_NAME;
goto rulemax_forwardsEx;
}
cnt93++;
}
loop93: ; /* Jump to here if this rule does not match */
}
}
}
// This is where rules clean up and exit
//
goto rulemax_forwardsEx; /* Prevent compiler warnings */
rulemax_forwardsEx: ;
retval.stop = LT(-1);
if (HASEXCEPTION())
{
// This is ugly. We set the exception type to ANTLR3_RECOGNITION_EXCEPTION so we can always
// catch them.
//PREPORTERROR();
EXCEPTION->type = ANTLR3_RECOGNITION_EXCEPTION;
}
return retval;
} | static belle_sip_messageParser_max_forwards_return
max_forwards(pbelle_sip_messageParser ctx)
{
belle_sip_messageParser_max_forwards_return retval;
/* Initialize rule variables
*/
retval.start = LT(1); retval.stop = retval.start;
{
// ../grammars/belle_sip_message.g:1040:13: ( ( DIGIT )+ )
// ../grammars/belle_sip_message.g:1040:14: ( DIGIT )+
{
// ../grammars/belle_sip_message.g:1040:14: ( DIGIT )+
{
int cnt93=0;
for (;;)
{
int alt93=2;
switch ( LA(1) )
{
case DIGIT:
{
alt93=1;
}
break;
}
switch (alt93)
{
case 1:
// ../grammars/belle_sip_message.g:1040:14: DIGIT
{
MATCHT(DIGIT, &FOLLOW_DIGIT_in_max_forwards3887);
if (HASEXCEPTION())
{
goto rulemax_forwardsEx;
}
if (HASFAILED())
{
return retval;
}
}
break;
default:
if ( cnt93 >= 1 )
{
goto loop93;
}
if (BACKTRACKING>0)
{
FAILEDFLAG = ANTLR3_TRUE;
return retval;
}
/* mismatchedSetEx()
*/
CONSTRUCTEX();
EXCEPTION->type = ANTLR3_EARLY_EXIT_EXCEPTION;
EXCEPTION->name = (void *)ANTLR3_EARLY_EXIT_NAME;
goto rulemax_forwardsEx;
}
cnt93++;
}
loop93: ; /* Jump to here if this rule does not match */
}
}
}
// This is where rules clean up and exit
//
goto rulemax_forwardsEx; /* Prevent compiler warnings */
rulemax_forwardsEx: ;
retval.stop = LT(-1);
if (HASEXCEPTION())
{
// This is ugly. We set the exception type to ANTLR3_RECOGNITION_EXCEPTION so we can always
// catch them.
//PREPORTERROR();
EXCEPTION->type = ANTLR3_RECOGNITION_EXCEPTION;
}
return retval;
} | Fix crash while receiving some kind of invalid from header. | https://github.com/BelledonneCommunications/belle-sip/commit/116e3eb48fe43ea63eb9f3c4b4b30c48d58d6ff0 | null | null | src/grammars/belle_sip_messageParser.c | 0 | c | false | 2021-05-13T14:08:12Z |
@Override
public void tick()
{
if (level.isClientSide)
{
EntityHelpers.startOrStop(sittingAnimation, isSitting(), tickCount);
EntityHelpers.startOrStop(sleepingAnimation, isSleeping(), tickCount);
}
super.tick();
} | @Override
public void tick()
{
if (level.isClientSide)
{
EntityHelpers.startOrStop(sittingAnimation, isSitting(), tickCount);
EntityHelpers.startOrStop(sleepingAnimation, isSleeping(), tickCount);
}
super.tick();
if (needsCommandUpdate && command.activity != null)
{
getBrain().setActiveActivityIfPossible(command.activity.get());
needsCommandUpdate = false;
}
} | fix cat attack behavior by standardizing it as a different behavior | https://github.com/TerraFirmaCraft/TerraFirmaCraft/commit/209586124a49673308b1ab271473921a00a91c9b | null | null | src/main/java/net/dries007/tfc/common/entities/livestock/pet/TamableMammal.java | 0 | java | false | 2022-11-01T16:43:02Z |
@Override
public void renderComponent(MatrixStack matrices, float delta) {
DrawPosition pos = getPos();
int lastY = 1;
int i = 0;
for (ItemUtil.ItemStorage item : this.added) {
if (i > 5) {
matrices.pop();
return;
}
TextCollector message = new TextCollector();
message.add(Text.literal("+ "));
message.add(Text.literal("[").setStyle(Style.EMPTY.withColor(TextColor.fromRgb(ColorUtil.DARK_GRAY.color()))));
message.add(Text.literal(item.times + "").setStyle(Style.EMPTY.withColor(Formatting.WHITE)));
message.add(Text.literal("] ").setStyle(Style.EMPTY.withColor(TextColor.fromRgb(ColorUtil.DARK_GRAY.color()))));
message.add(item.stack.getName());
OrderedText text = Language.getInstance().reorder(message.getCombined());
if (shadow.getValue()) {
client.textRenderer.drawWithShadow(
matrices, text, pos.x(), pos.y() + lastY, ColorUtil.SELECTOR_GREEN.color()
);
} else {
client.textRenderer.draw(
matrices, text, pos.x(), pos.y() + lastY, ColorUtil.SELECTOR_GREEN.color()
);
}
lastY = lastY + client.textRenderer.fontHeight + 2;
i++;
}
for (ItemUtil.ItemStorage item : this.removed) {
if (i > 5) {
matrices.pop();
return;
}
TextCollector message = new TextCollector();
message.add(Text.literal("- "));
message.add(Text.literal("[").setStyle(Style.EMPTY.withColor(TextColor.fromRgb(ColorUtil.DARK_GRAY.color()))));
message.add(Text.literal(item.times + "").setStyle(Style.EMPTY.withColor(Formatting.WHITE)));
message.add(Text.literal("] ").setStyle(Style.EMPTY.withColor(TextColor.fromRgb(ColorUtil.DARK_GRAY.color()))));
message.add(item.stack.getName());
OrderedText text = Language.getInstance().reorder(message.getCombined());
if (shadow.getValue()) {
client.textRenderer.drawWithShadow(
matrices, text, pos.x(), pos.y() + lastY, Formatting.RED.getColorValue()
);
} else {
client.textRenderer.draw(
matrices, text, pos.x(), pos.y() + lastY, Formatting.RED.getColorValue()
);
}
lastY = lastY + client.textRenderer.fontHeight + 2;
i++;
}
} | @Override
public void renderComponent(MatrixStack matrices, float delta) {
DrawPosition pos = getPos();
int lastY = 1;
int i = 0;
for (ItemUtil.ItemStorage item : this.added) {
if (i > 5) {
return;
}
TextCollector message = new TextCollector();
message.add(Text.literal("+ "));
message.add(Text.literal("[").setStyle(Style.EMPTY.withColor(TextColor.fromRgb(ColorUtil.DARK_GRAY.color()))));
message.add(Text.literal(item.times + "").setStyle(Style.EMPTY.withColor(Formatting.WHITE)));
message.add(Text.literal("] ").setStyle(Style.EMPTY.withColor(TextColor.fromRgb(ColorUtil.DARK_GRAY.color()))));
message.add(item.stack.getName());
OrderedText text = Language.getInstance().reorder(message.getCombined());
if (shadow.getValue()) {
client.textRenderer.drawWithShadow(
matrices, text, pos.x(), pos.y() + lastY, ColorUtil.SELECTOR_GREEN.color()
);
} else {
client.textRenderer.draw(
matrices, text, pos.x(), pos.y() + lastY, ColorUtil.SELECTOR_GREEN.color()
);
}
lastY = lastY + client.textRenderer.fontHeight + 2;
i++;
}
for (ItemUtil.ItemStorage item : this.removed) {
if (i > 5) {
return;
}
TextCollector message = new TextCollector();
message.add(Text.literal("- "));
message.add(Text.literal("[").setStyle(Style.EMPTY.withColor(TextColor.fromRgb(ColorUtil.DARK_GRAY.color()))));
message.add(Text.literal(item.times + "").setStyle(Style.EMPTY.withColor(Formatting.WHITE)));
message.add(Text.literal("] ").setStyle(Style.EMPTY.withColor(TextColor.fromRgb(ColorUtil.DARK_GRAY.color()))));
message.add(item.stack.getName());
OrderedText text = Language.getInstance().reorder(message.getCombined());
if (shadow.getValue()) {
client.textRenderer.drawWithShadow(
matrices, text, pos.x(), pos.y() + lastY, Formatting.RED.getColorValue()
);
} else {
client.textRenderer.draw(
matrices, text, pos.x(), pos.y() + lastY, Formatting.RED.getColorValue()
);
}
lastY = lastY + client.textRenderer.fontHeight + 2;
i++;
}
} | Fix crash | https://github.com/DarkKronicle/KronHUD/commit/443939d0a4a0a5dc8486b26d19aa1ffbe3554251 | null | null | src/main/java/io/github/darkkronicle/kronhud/gui/hud/item/ItemUpdateHud.java | 0 | java | false | null |
@Override
protected String[] mandatoryHeaders() {
return new String[] {
GITHUB_ID_HEADER,
};
} | @Override
protected String[][] mandatoryHeaders() {
return new String[][] {
GIT_ID_HEADERS,
};
} | [#1661] Upgrade to Gradle 7 (#1780)
The Liferay plugin has recently been updated with annotations for
Gradle 7. As Gradle 7 is now available for use, this also means that the
Shadow plugin can be updated to the latest version.
There are some deprecated features that will be removed in a future
Gradle 8 release. These include an implicit dependency between
processResources and zipReport and the change from Report.enabled to
Report.required.
The processSystemtestResources task fails because Gradle 7 requires a
duplicatesStrategy to be defined for the task, which is of the Copy
type.
Let's upgrade to Gradle 7, update the relevant plugins and fix the
duplicatesStrategy error as well as deprecations related to Gradle 8. | https://github.com/reposense/RepoSense/commit/c8d50d43598a69497ca437365dea83f817439df1 | null | null | src/main/java/reposense/parser/AuthorConfigCsvParser.java | 0 | java | false | 2022-07-26T14:39:13Z |
@GET
@Path("/{tenant}/{namespace}/{topic}/subscribeRate")
@ApiOperation(value = "Get subscribe rate configuration for specified topic.")
@ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"),
@ApiResponse(code = 404, message = "Topic does not exist"),
@ApiResponse(code = 405,
message = "Topic level policy is disabled, please enable the topic level policy and retry"),
@ApiResponse(code = 409, message = "Concurrent modification")})
public void getSubscribeRate(@Suspended final AsyncResponse asyncResponse,
@PathParam("tenant") String tenant,
@PathParam("namespace") String namespace,
@PathParam("topic") @Encoded String encodedTopic,
@QueryParam("applied") @DefaultValue("false") boolean applied,
@QueryParam("isGlobal") @DefaultValue("false") boolean isGlobal,
@ApiParam(value = "Is authentication required to perform this operation")
@QueryParam("authoritative") @DefaultValue("false") boolean authoritative) {
validateTopicName(tenant, namespace, encodedTopic);
preValidation(authoritative)
.thenCompose(__ -> internalGetSubscribeRate(applied, isGlobal))
.thenApply(asyncResponse::resume).exceptionally(ex -> {
handleTopicPolicyException("getSubscribeRate", ex, asyncResponse);
return null;
});
} | @GET
@Path("/{tenant}/{namespace}/{topic}/subscribeRate")
@ApiOperation(value = "Get subscribe rate configuration for specified topic.")
@ApiResponses(value = {@ApiResponse(code = 403, message = "Don't have admin permission"),
@ApiResponse(code = 404, message = "Topic does not exist"),
@ApiResponse(code = 405,
message = "Topic level policy is disabled, please enable the topic level policy and retry"),
@ApiResponse(code = 409, message = "Concurrent modification")})
public void getSubscribeRate(@Suspended final AsyncResponse asyncResponse,
@PathParam("tenant") String tenant,
@PathParam("namespace") String namespace,
@PathParam("topic") @Encoded String encodedTopic,
@QueryParam("applied") @DefaultValue("false") boolean applied,
@QueryParam("isGlobal") @DefaultValue("false") boolean isGlobal,
@ApiParam(value = "Whether leader broker redirected this call to this broker. For internal use.")
@QueryParam("authoritative") @DefaultValue("false") boolean authoritative) {
validateTopicName(tenant, namespace, encodedTopic);
preValidation(authoritative)
.thenCompose(__ -> internalGetSubscribeRate(applied, isGlobal))
.thenApply(asyncResponse::resume).exceptionally(ex -> {
handleTopicPolicyException("getSubscribeRate", ex, asyncResponse);
return null;
});
} | Update/fix Swagger Annotation for param: authoritative (#16222)
* Update/fix Swagger Annotation for param: authoritative
* Fix Checkstyle | https://github.com/apache/pulsar/commit/b4ef4a3f4b752749277ae460d7e0739cf32672bc | null | null | pulsar-broker/src/main/java/org/apache/pulsar/broker/admin/v2/PersistentTopics.java | 0 | java | false | 2022-07-12T23:58:26Z |
def load_explore_json_into_cache( # pylint: disable=too-many-locals
job_metadata: Dict[str, Any],
form_data: Dict[str, Any],
response_type: Optional[str] = None,
force: bool = False,
) -> None:
cache_key_prefix = "ejr-" # ejr: explore_json request
user = (
security_manager.get_user_by_id(job_metadata.get("user_id"))
or security_manager.get_anonymous_user()
)
with override_user(user, force=False):
try:
set_form_data(form_data)
datasource_id, datasource_type = get_datasource_info(None, None, form_data)
# Perform a deep copy here so that below we can cache the original
# value of the form_data object. This is necessary since the viz
# objects modify the form_data object. If the modified version were
# to be cached here, it will lead to a cache miss when clients
# attempt to retrieve the value of the completed async query.
original_form_data = copy.deepcopy(form_data)
viz_obj = get_viz(
datasource_type=cast(str, datasource_type),
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
# run query & cache results
payload = viz_obj.get_payload()
if viz_obj.has_error(payload):
raise SupersetVizException(errors=payload["errors"])
# Cache the original form_data value for async retrieval
cache_value = {
"form_data": original_form_data,
"response_type": response_type,
}
cache_key = generate_cache_key(cache_value, cache_key_prefix)
set_and_log_cache(cache_manager.cache, cache_key, cache_value)
result_url = f"/superset/explore_json/data/{cache_key}"
async_query_manager.update_job(
job_metadata,
async_query_manager.STATUS_DONE,
result_url=result_url,
)
except SoftTimeLimitExceeded as ex:
logger.warning(
"A timeout occurred while loading explore json, error: %s", ex
)
raise ex
except Exception as ex:
if isinstance(ex, SupersetVizException):
errors = ex.errors # pylint: disable=no-member
else:
error = (
ex.message # pylint: disable=no-member
if hasattr(ex, "message")
else str(ex)
)
errors = [error]
async_query_manager.update_job(
job_metadata, async_query_manager.STATUS_ERROR, errors=errors
)
raise ex | def load_explore_json_into_cache( # pylint: disable=too-many-locals
job_metadata: dict[str, Any],
form_data: dict[str, Any],
response_type: str | None = None,
force: bool = False,
) -> None:
cache_key_prefix = "ejr-" # ejr: explore_json request
user = (
security_manager.get_user_by_id(job_metadata.get("user_id"))
or security_manager.get_anonymous_user()
)
with override_user(user, force=False):
try:
set_form_data(form_data)
datasource_id, datasource_type = get_datasource_info(None, None, form_data)
# Perform a deep copy here so that below we can cache the original
# value of the form_data object. This is necessary since the viz
# objects modify the form_data object. If the modified version were
# to be cached here, it will lead to a cache miss when clients
# attempt to retrieve the value of the completed async query.
original_form_data = copy.deepcopy(form_data)
viz_obj = get_viz(
datasource_type=cast(str, datasource_type),
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
# run query & cache results
payload = viz_obj.get_payload()
if viz_obj.has_error(payload):
raise SupersetVizException(errors=payload["errors"])
# Cache the original form_data value for async retrieval
cache_value = {
"form_data": original_form_data,
"response_type": response_type,
}
cache_key = generate_cache_key(cache_value, cache_key_prefix)
set_and_log_cache(cache_manager.cache, cache_key, cache_value)
result_url = f"/superset/explore_json/data/{cache_key}"
async_query_manager.update_job(
job_metadata,
async_query_manager.STATUS_DONE,
result_url=result_url,
)
except SoftTimeLimitExceeded as ex:
logger.warning(
"A timeout occurred while loading explore json, error: %s", ex
)
raise ex
except Exception as ex:
if isinstance(ex, SupersetVizException):
errors = ex.errors # pylint: disable=no-member
else:
error = (
ex.message # pylint: disable=no-member
if hasattr(ex, "message")
else str(ex)
)
errors = [error]
async_query_manager.update_job(
job_metadata, async_query_manager.STATUS_ERROR, errors=errors
)
raise ex | Merge branch 'master' into fix/db-val-param-perms | https://github.com/apache/superset/commit/4e2fd6f4f04c61e8c1d3ec3f233581a05f8b6213 | null | null | superset/tasks/async_queries.py | 0 | py | false | 2023-06-05T08:42:54Z |
function sendExportedPresentationChatMsg(meetingId, presentationId, fileURI) {
const CHAT_CONFIG = Meteor.settings.public.chat;
const PUBLIC_GROUP_CHAT_ID = CHAT_CONFIG.public_group_id;
const PUBLIC_CHAT_SYSTEM_ID = CHAT_CONFIG.system_userid;
const CHAT_EXPORTED_PRESENTATION_MESSAGE = CHAT_CONFIG.system_messages_keys.chat_exported_presentation;
const SYSTEM_CHAT_TYPE = CHAT_CONFIG.type_system;
const pres = Presentations.findOne({ meetingId, id: presentationId });
const extra = {
type: 'presentation',
fileURI,
filename: pres?.name || DEFAULT_FILENAME,
};
const payload = {
id: `${SYSTEM_CHAT_TYPE}-${CHAT_EXPORTED_PRESENTATION_MESSAGE}`,
timestamp: Date.now(),
correlationId: `${PUBLIC_CHAT_SYSTEM_ID}-${Date.now()}`,
sender: {
id: PUBLIC_CHAT_SYSTEM_ID,
name: '',
},
message: '',
extra,
};
return addSystemMsg(meetingId, PUBLIC_GROUP_CHAT_ID, payload);
} | async function sendExportedPresentationChatMsg(meetingId, presentationId, fileURI) {
const CHAT_CONFIG = Meteor.settings.public.chat;
const PUBLIC_GROUP_CHAT_ID = CHAT_CONFIG.public_group_id;
const PUBLIC_CHAT_SYSTEM_ID = CHAT_CONFIG.system_userid;
const CHAT_EXPORTED_PRESENTATION_MESSAGE = CHAT_CONFIG.system_messages_keys.chat_exported_presentation;
const SYSTEM_CHAT_TYPE = CHAT_CONFIG.type_system;
const pres = await Presentations.findOneAsync({ meetingId, id: presentationId });
const extra = {
type: 'presentation',
fileURI,
filename: pres?.name || DEFAULT_FILENAME,
};
const payload = {
id: `${SYSTEM_CHAT_TYPE}-${CHAT_EXPORTED_PRESENTATION_MESSAGE}`,
timestamp: Date.now(),
correlationId: `${PUBLIC_CHAT_SYSTEM_ID}-${Date.now()}`,
sender: {
id: PUBLIC_CHAT_SYSTEM_ID,
name: '',
},
message: '',
extra,
};
const result = await addSystemMsg(meetingId, PUBLIC_GROUP_CHAT_ID, payload);
return result;
} | Merge branch 'v2.6.x-release' of github.com:bigbluebutton/bigbluebutton into ssrf-fix | https://github.com/bigbluebutton/bigbluebutton/commit/22de2b49a5d218910923a1048bb73395e53c99bf | CVE-2023-33176 | ['CWE-918'] | bigbluebutton-html5/imports/api/presentations/server/handlers/sendExportedPresentationChatMsg.js | 0 | js | false | 2023-04-13T12:40:07Z |
public void bind(int position) {
Permission permission = getItem(position);
appOpsRefStateView.setVisibility(View.GONE);
if (permission.isReferenced() == null) {
referenceView.setBackgroundColor(PackageAdapter.ORANGE);
} else if (!permission.isReferenced()) {
referenceView.setBackgroundColor(Color.RED);
if (permission.isAppOps()) {
appOpsRefStateView.setText(
Utils.htmlToString(
App.getContext().getString(R.string.should_be, permission.getReference())));
appOpsRefStateView.setVisibility(View.VISIBLE);
}
} else {
referenceView.setBackgroundColor(Color.GREEN);
}
if (permission.getIconResId() != null) {
groupIconView.setImageResource(permission.getIconResId());
}
spinnerContainer.setOnClickListener(null);
if (permission.isAppOps()) {
if (permission.dependsOn() == null) {
spinnerContainer.setVisibility(View.VISIBLE);
spinnerContainer.setOnClickListener(v -> spinner.performClick());
} else {
spinnerContainer.setVisibility(View.INVISIBLE);
}
}
permissionNameView.setText(permission.createPermNameString());
protectionLevelView.setText(permission.createProtectLevelString());
appOpsTimeView.setVisibility(View.GONE);
if (permission.isAppOps()) {
String time = permission.getAppOpsAccessTime();
if (time != null) {
appOpsTimeView.setText(time);
appOpsTimeView.setVisibility(View.VISIBLE);
}
if (permission.getName().equals("RUN_IN_BACKGROUND")
|| permission.getName().equals("RUN_ANY_IN_BACKGROUND")) {
spinner.setAdapter(mAppOpModesBgAdapter);
} else {
spinner.setAdapter(mAppOpModesAdapter);
}
spinner.setSelection(permission.getAppOpsMode());
spinner.setOnItemSelectedListener(new AppOpsModeSelectListener(permission));
stateSwitch.setVisibility(View.GONE);
spinner.setEnabled(permission.isChangeable());
appOpsDefaultView.setVisibility(permission.isAppOpsSet() ? View.GONE : View.VISIBLE);
} else {
if (permission.isProviderMissing()) {
stateSwitch.setVisibility(View.INVISIBLE);
} else {
stateSwitch.setChecked(permission.isGranted());
stateSwitch.setEnabled(permission.isChangeable());
stateSwitch.setOnClickListener(
v -> {
stateSwitch.setChecked(permission.isGranted()); // do not change the state here
mSwitchToggleListener.onClick(permission);
});
stateSwitch.setVisibility(View.VISIBLE);
}
spinnerContainer.setVisibility(View.GONE);
}
} | public void bind(int position) {
Permission permission = getItem(position);
appOpsRefStateView.setVisibility(View.GONE);
if (permission.isReferenced() == null) {
referenceView.setBackgroundColor(PackageAdapter.ORANGE);
} else if (!permission.isReferenced()) {
referenceView.setBackgroundColor(Color.RED);
if (permission.isAppOps()) {
appOpsRefStateView.setText(
Utils.htmlToString(
App.getContext().getString(R.string.should_be, permission.getReference())));
appOpsRefStateView.setVisibility(View.VISIBLE);
}
} else {
referenceView.setBackgroundColor(Color.GREEN);
}
if (permission.getIconResId() != null) {
groupIconView.setImageResource(permission.getIconResId());
}
spinnerContainer.setOnClickListener(null);
if (permission.isAppOps()) {
if (permission.dependsOn() == null) {
spinnerContainer.setVisibility(View.VISIBLE);
spinnerContainer.setOnClickListener(v -> spinner.performClick());
} else {
spinnerContainer.setVisibility(View.INVISIBLE);
}
}
permissionNameView.setText(permission.createPermNameString());
protectionLevelView.setText(permission.createProtectLevelString());
appOpsTimeView.setVisibility(View.GONE);
if (permission.isAppOps()) {
String time = permission.getAppOpsAccessTime();
if (time != null) {
appOpsTimeView.setText(time);
appOpsTimeView.setVisibility(View.VISIBLE);
}
if (permission.getName().equals("RUN_IN_BACKGROUND")
|| permission.getName().equals("RUN_ANY_IN_BACKGROUND")) {
spinner.setAdapter(getAppOpModesAdapter(true));
} else {
spinner.setAdapter(getAppOpModesAdapter(false));
}
spinner.setSelection(permission.getAppOpsMode());
spinner.setOnItemSelectedListener(new AppOpsModeSelectListener(permission));
stateSwitch.setVisibility(View.GONE);
spinner.setEnabled(permission.isChangeable());
appOpsDefaultView.setVisibility(permission.isAppOpsSet() ? View.GONE : View.VISIBLE);
} else {
if (permission.isProviderMissing()) {
stateSwitch.setVisibility(View.INVISIBLE);
} else {
stateSwitch.setChecked(permission.isGranted());
stateSwitch.setEnabled(permission.isChangeable());
stateSwitch.setOnClickListener(
v -> {
stateSwitch.setChecked(permission.isGranted()); // do not change the state here
mSwitchToggleListener.onClick(permission);
});
stateSwitch.setVisibility(View.VISIBLE);
}
spinnerContainer.setVisibility(View.GONE);
}
} | Bump library versions | https://github.com/mirfatif/PermissionManagerX/commit/a73b6999996e43e52238f5dc31c037bb97efd5fa | null | null | app/src/main/java/com/mirfatif/permissionmanagerx/ui/PermissionAdapter.java | 0 | java | false | 2021-02-15T23:50:27Z |
private void makeUploadRequest(byte[] json, String tags) throws IOException {
// use RequestBody.create(MediaType, byte[]) to avoid changing Content-Type to
// "Content-Type: application/json; charset=UTF-8" which is not recognized
int contentLength = json.length;
RequestBody body = RequestBody.create(APPLICATION_JSON, json);
debuggerMetrics.histogram("batch.uploader.request.size", contentLength);
if (log.isDebugEnabled()) {
log.debug("Uploading batch data size={} bytes", contentLength);
}
HttpUrl.Builder builder = urlBase.newBuilder();
if (!tags.isEmpty()) {
builder.addQueryParameter("ddtags", tags);
}
Request.Builder requestBuilder = new Request.Builder().url(builder.build()).post(body);
if (apiKey != null) {
if (apiKey.isEmpty()) {
log.debug("API key is empty");
}
if (apiKey.length() != 32) {
log.debug(
"API key length is incorrect (truncated?) expected=32 actual={} API key={}...",
apiKey.length(),
apiKey.substring(0, Math.min(apiKey.length(), 6)));
}
requestBuilder.addHeader(HEADER_DD_API_KEY, apiKey);
} else {
log.debug("API key is null");
}
if (containerId != null) {
requestBuilder.addHeader(HEADER_DD_CONTAINER_ID, containerId);
}
Request request = requestBuilder.build();
log.debug("Sending request: {} CT: {}", request, request.body().contentType());
client.newCall(request).enqueue(responseCallback);
inflightRequests.register();
} | private void makeUploadRequest(byte[] json, String tags) throws IOException {
// use RequestBody.create(MediaType, byte[]) to avoid changing Content-Type to
// "Content-Type: application/json; charset=UTF-8" which is not recognized
int contentLength = json.length;
RequestBody body = RequestBody.create(APPLICATION_JSON, json);
debuggerMetrics.histogram("batch.uploader.request.size", contentLength);
if (log.isDebugEnabled()) {
log.debug("Uploading batch data size={} bytes", contentLength);
}
HttpUrl.Builder builder = urlBase.newBuilder();
if (!tags.isEmpty()) {
builder.addQueryParameter("ddtags", tags);
}
try {
Request.Builder requestBuilder = new Request.Builder().url(builder.build()).post(body);
if (apiKey != null) {
if (apiKey.isEmpty()) {
log.debug("API key is empty");
}
if (apiKey.length() != 32) {
log.debug(
"API key length is incorrect (truncated?) expected=32 actual={} API key={}...",
apiKey.length(),
apiKey.substring(0, Math.min(apiKey.length(), 6)));
}
requestBuilder.addHeader(HEADER_DD_API_KEY, apiKey);
} else {
log.debug("API key is null");
}
if (containerId != null) {
requestBuilder.addHeader(HEADER_DD_CONTAINER_ID, containerId);
}
Request request = requestBuilder.build();
log.debug("Sending request: {} CT: {}", request, request.body().contentType());
client.newCall(request).enqueue(responseCallback);
inflightRequests.register();
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException();
}
} | Merge pull request #3559 from DataDog/glopes/dependencies
Telemetry; support for reporting dependencies | https://github.com/DataDog/dd-trace-java/commit/47a0d0e37c81c79cb84ef4fac01a7cf55209acdf | null | null | dd-java-agent/agent-debugger/src/main/java/com/datadog/debugger/uploader/BatchUploader.java | 0 | java | false | 2022-07-28T18:52:18Z |
@Override
public String getCommandUsage(ICommandSender arg0) {
return "/" + getCommandName() + " <zombie/spider/wolf/fishing/catacombs/mythological/auto/off> [winter/festival/spooky/session/f(1-7)]";
} | @Override
public String getCommandUsage(ICommandSender arg0) {
return "/" + getCommandName() + " <zombie/spider/wolf/fishing/catacombs/mythological/ghost/auto/off> [winter/festival/spooky/session/f(1-7)]";
} | Added Ghost Tracker/ Fixed Installer crashes due to renaming ModID | https://github.com/bowser0000/SkyblockMod/commit/ed93b04124db9b8944ddd1ed33d0f4dabf4a486b | null | null | src/main/java/me/Danker/commands/DisplayCommand.java | 0 | java | false | 2021-03-11T18:48:53Z |
public void addPurl(String purl) {
this.purls.add(purl);
} | public void addPurl(String purl) {
var newPurls = getPurls();
newPurls.add(purl);
setPurls(newPurls);
} | Regenerated JOOQ classes with 3.14.4 to match metadata plugin. | https://github.com/fasten-project/fasten/commit/2b6b0cceb18d56dbfe2348010df5b5de4a56aa37 | null | null | core/src/main/java/eu/fasten/core/data/vulnerability/Vulnerability.java | 0 | java | false | 2021-10-17T11:38:02Z |
@Override
public void onLocationChanged(@NonNull Location location) {
String response = Utils.buildCoordinatesResponse(location.getLatitude(), location.getLongitude());
Utils.sendSms(smsManager, response, sender);
} | @Override
public void onLocationChanged(@NonNull Location location) {
Log.d("SmsHandler", "Going to send coordinates: " + location.getLatitude() + " " + location.getLongitude());
String response = Utils.buildCoordinatesResponse(location.getLatitude(), location.getLongitude());
Utils.sendSms(smsManager, response, sender);
} | Update README.md | https://github.com/xfarrow/locatemydevice/commit/5ed13652f380d501a62a6cfb7a7e3c267c27bfd3 | null | null | src/app/src/main/java/com/xfarrow/locatemydevice/SmsHandler.java | 0 | java | false | 2022-11-08T22:02:54Z |
void registerServer() {
VersionAgnosticCommandManager.get().registerServerCommand(
CommandManager.literal(Modget.NAMESPACE_SERVER)
.then(CommandManager.literal(COMMAND)
.requires(source -> source.hasPermissionLevel(PERMISSION_LEVEL))
.executes(context -> {
PlayerEntity player = context.getSource().getPlayer();
new StartThread(player).start();
return 1;
})
)
);
} | void registerServer() {
VersionAgnosticServerCommandManager.get().register(
CommandManager.literal(Modget.NAMESPACE_SERVER)
.then(CommandManager.literal(COMMAND)
.requires(source -> source.hasPermissionLevel(PERMISSION_LEVEL))
.executes(context -> {
PlayerEntity player = context.getSource().getPlayer();
new StartThread(player).start();
return 1;
})
)
);
} | Don't upload modget-core build artifacts | https://github.com/ReviversMC/modget-minecraft/commit/17f5235f98d112ceb086533d1c4901e7a2a3fce9 | null | null | modget-core/src/main/java/com/github/reviversmc/modget/minecraft/command/ListCommand.java | 0 | java | false | 2022-08-12T14:50:34Z |
private static String exactReasonToString(int reason) {
switch (reason) {
case EXACT_ALLOW_REASON_ALLOW_LIST:
return "allow-listed";
case EXACT_ALLOW_REASON_COMPAT:
return "compat";
case EXACT_ALLOW_REASON_PERMISSION:
return "permission";
case EXACT_ALLOW_REASON_NOT_APPLICABLE:
return "N/A";
default:
return "--unknown--";
}
} | private static String exactReasonToString(int reason) {
switch (reason) {
case EXACT_ALLOW_REASON_ALLOW_LIST:
return "allow-listed";
case EXACT_ALLOW_REASON_COMPAT:
return "compat";
case EXACT_ALLOW_REASON_PERMISSION:
return "permission";
case EXACT_ALLOW_REASON_POLICY_PERMISSION:
return "policy_permission";
case EXACT_ALLOW_REASON_NOT_APPLICABLE:
return "N/A";
default:
return "--unknown--";
}
} | Add a metrics reason code for USE_EXACT_ALARM
Updating the reason code logging to better differentiate between usage
of both the permissions.
The only functional changes expected from this change are the values of
exactAllowReason in an alarm:
- Prioritized alarms change from "Permission" to "Not applicable"
- When crashing is turned off, reason code changes from "Permission" to
"Not applicable"
- When the caller has USE_EXACT_ALARM, reason code changes from
"Permission" to "Policy Permission".
Also updating the error message to include USE_EXACT_ALARM.
Test: atest FrameworksMockingServicesTests:AlarmManagerServiceTest
atest CtsStatsdAtomHostTestCases:AlarmStatsTest
Existing tests should pass:
atest CtsAlarmManagerTestCases
Manually test output of `./out/host/linux-x86/bin/statsd_testdrive 368`
and `adb shell dumpsys alarm`
Bug: 231661615
Change-Id: I13239c300cfb468e7fc2df21fccb494fdf8c80ac | https://github.com/LineageOS/android_frameworks_base/commit/4d56d13e01d2f752e7c6f30ad7c09c7973e32d69 | null | null | apex/jobscheduler/service/java/com/android/server/alarm/Alarm.java | 0 | java | false | null |
public static void run(Context context1, BillInfo billInfo) {
if (billInfo.isAuto()) {
Log.i(TAG, "自动记录账单...");
goApp(context1, billInfo);
return;
}
Log.i(TAG, "唤起自动记账面板...");
MMKV mmkv = MMKV.defaultMMKV();
Log.i(TAG, "记账请求发起,账单初始信息:\n" + billInfo.dump());
if (isReception(context1)) {
Log.i(TAG, "当前处于锁屏状态");
if (mmkv.getBoolean("autoIncome", false)) {
Log.i(TAG, "全自动模式->直接对钱迹发起请求");
goApp(context1, billInfo);
} else {
Log.i(TAG, "半自动模式->发出记账通知");
//通知处理
Handler handler = new Handler(Looper.getMainLooper()) {
@Override
public void handleMessage(@NonNull Message msg) {
int count = (int) msg.obj;
Tool.notice(context1, context1.getString(R.string.notice_name), "您有" + count + "条账单待记录");
}
};
TaskThread.onThread(() -> {
AutoBill[] autoBills = Db.db.AutoBillDao().getNoRecord();
if (autoBills == null || autoBills.length == 0) {
Log.i("异常:数据为0走不到这个流程!");
return;
}
HandlerUtil.send(handler, autoBills.length);
});
}
} else {
Log.i(TAG, "当前处于前台状态");
if (mmkv.getBoolean("autoPay", false)) {
Log.i(TAG, "全自动模式->直接对钱迹发起请求");
goApp(context1, billInfo);
return;
}
Log.i(TAG, "半自动模式 -> 下一步");
if (getTimeout() == 0) {
end(context1, billInfo);
} else {
Log.i("存在超时,弹出超时面板");
showTip(context1, billInfo);
}
}
} | public static void run(Context context1, BillInfo billInfo) {
if (billInfo.isAuto()) {
Log.i(TAG, "自动记录账单...");
goApp(context1, billInfo);
return;
}
Log.i(TAG, "唤起自动记账面板...");
MMKV mmkv = MMKV.defaultMMKV();
Log.i(TAG, "记账请求发起,账单初始信息:\n" + billInfo.dump());
if (isReception(context1)) {
Log.i(TAG, "当前处于锁屏状态");
if (mmkv.getBoolean("autoIncome", false)) {
Log.i(TAG, "全自动模式->直接对钱迹发起请求");
goApp(context1, billInfo);
} else {
Log.i(TAG, "半自动模式->发出记账通知");
//通知处理
Handler handler = new Handler(Looper.getMainLooper()) {
@Override
public void handleMessage(@NonNull Message msg) {
int count = (int) msg.obj;
Tool.notice(context1, context1.getString(R.string.notice_name), "您有" + count + "条账单待记录");
}
};
TaskThread.onThread(() -> {
AutoBill[] autoBills = Db.db.AutoBillDao().getNoRecord();
if (autoBills == null || autoBills.length == 0) {
Log.i("异常:数据为0走不到这个流程!");
return;
}
HandlerUtil.send(handler, autoBills.length, 0);
});
}
} else {
Log.i(TAG, "当前处于前台状态");
if (mmkv.getBoolean("autoPay", false)) {
Log.i(TAG, "全自动模式->直接对钱迹发起请求");
goApp(context1, billInfo);
return;
}
Log.i(TAG, "半自动模式 -> 下一步");
if (getTimeout() == 0) {
end(context1, billInfo);
} else {
Log.i("存在超时,弹出超时面板");
showTip(context1, billInfo);
}
}
} | Fix: lock screen crash
Signed-off-by: dreamncn <dream@dreamn.cn> | https://github.com/AutoAccountingOrg/Qianji_auto/commit/4520227b2c20202b45a821e19e4372a88290c1a4 | null | null | app/src/main/java/cn/dreamn/qianji_auto/bills/SendDataToApp.java | 0 | java | false | null |
@SuppressLint("NewApi")
private void createNotification(MessageObject messageObject, boolean forBitmap) {
String songName = messageObject.getMusicTitle();
String authorName = messageObject.getMusicAuthor();
AudioInfo audioInfo = MediaController.getInstance().getAudioInfo();
Intent intent = new Intent(ApplicationLoader.applicationContext, LaunchActivity.class);
intent.setAction("com.tmessages.openplayer");
intent.addCategory(Intent.CATEGORY_LAUNCHER);
PendingIntent contentIntent = PendingIntent.getActivity(ApplicationLoader.applicationContext, 0, intent, Build.VERSION.SDK_INT >= Build.VERSION_CODES.S ? PendingIntent.FLAG_MUTABLE : 0);
Notification notification;
String artworkUrl = messageObject.getArtworkUrl(true);
String artworkUrlBig = messageObject.getArtworkUrl(false);
long duration = messageObject.getDuration() * 1000;
Bitmap albumArt = audioInfo != null ? audioInfo.getSmallCover() : null;
Bitmap fullAlbumArt = audioInfo != null ? audioInfo.getCover() : null;
loadingFilePath = null;
imageReceiver.setImageBitmap((BitmapDrawable) null);
if (albumArt == null && !TextUtils.isEmpty(artworkUrl)) {
fullAlbumArt = loadArtworkFromUrl(artworkUrlBig, true, !forBitmap);
if (fullAlbumArt == null) {
fullAlbumArt = albumArt = loadArtworkFromUrl(artworkUrl, false, !forBitmap);
} else {
albumArt = loadArtworkFromUrl(artworkUrlBig, false, !forBitmap);
}
} else {
loadingFilePath = FileLoader.getInstance(UserConfig.selectedAccount).getPathToAttach(messageObject.getDocument()).getAbsolutePath();
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
boolean isPlaying = !MediaController.getInstance().isMessagePaused();
PendingIntent pendingPrev = PendingIntent.getBroadcast(getApplicationContext(), 0, new Intent(NOTIFY_PREVIOUS).setComponent(new ComponentName(this, MusicPlayerReceiver.class)), PendingIntent.FLAG_CANCEL_CURRENT);
//PendingIntent pendingStop = PendingIntent.getBroadcast(getApplicationContext(), 0, new Intent(NOTIFY_CLOSE).setComponent(new ComponentName(this, MusicPlayerReceiver.class)), PendingIntent.FLAG_CANCEL_CURRENT);
PendingIntent pendingStop = PendingIntent.getService(getApplicationContext(), 0, new Intent(this, getClass()).setAction(getPackageName() + ".STOP_PLAYER"), PendingIntent.FLAG_CANCEL_CURRENT);
PendingIntent pendingPlaypause = PendingIntent.getBroadcast(getApplicationContext(), 0, new Intent(isPlaying ? NOTIFY_PAUSE : NOTIFY_PLAY).setComponent(new ComponentName(this, MusicPlayerReceiver.class)), PendingIntent.FLAG_CANCEL_CURRENT);
PendingIntent pendingNext = PendingIntent.getBroadcast(getApplicationContext(), 0, new Intent(NOTIFY_NEXT).setComponent(new ComponentName(this, MusicPlayerReceiver.class)), PendingIntent.FLAG_CANCEL_CURRENT);
PendingIntent pendingSeek = PendingIntent.getBroadcast(getApplicationContext(), 0, new Intent(NOTIFY_SEEK).setComponent(new ComponentName(this, MusicPlayerReceiver.class)), PendingIntent.FLAG_CANCEL_CURRENT);
Notification.Builder bldr = new Notification.Builder(this);
bldr.setSmallIcon(R.drawable.player)
.setOngoing(isPlaying)
.setContentTitle(songName)
.setContentText(authorName)
.setSubText(audioInfo != null ? audioInfo.getAlbum() : null)
.setContentIntent(contentIntent)
.setDeleteIntent(pendingStop)
.setShowWhen(false)
.setCategory(Notification.CATEGORY_TRANSPORT)
.setPriority(Notification.PRIORITY_MAX)
.setStyle(new Notification.MediaStyle()
.setMediaSession(mediaSession.getSessionToken())
.setShowActionsInCompactView(0, 1, 2));
if (Build.VERSION.SDK_INT >= 26) {
NotificationsController.checkOtherNotificationsChannel();
bldr.setChannelId(NotificationsController.OTHER_NOTIFICATIONS_CHANNEL);
}
if (albumArt != null) {
bldr.setLargeIcon(albumArt);
} else {
bldr.setLargeIcon(albumArtPlaceholder);
}
final String nextDescription = LocaleController.getString("Next", R.string.Next);
final String previousDescription = LocaleController.getString("AccDescrPrevious", R.string.AccDescrPrevious);
if (MediaController.getInstance().isDownloadingCurrentMessage()) {
playbackState.setState(PlaybackState.STATE_BUFFERING, 0, 1).setActions(0);
bldr.addAction(new Notification.Action.Builder(R.drawable.ic_action_previous, previousDescription, pendingPrev).build())
.addAction(new Notification.Action.Builder(R.drawable.loading_animation2, LocaleController.getString("Loading", R.string.Loading), null).build())
.addAction(new Notification.Action.Builder(R.drawable.ic_action_next, nextDescription, pendingNext).build());
} else {
playbackState.setState(isPlaying ? PlaybackState.STATE_PLAYING : PlaybackState.STATE_PAUSED,
MediaController.getInstance().getPlayingMessageObject().audioProgressSec * 1000L,
isPlaying ? 1 : 0)
.setActions(PlaybackState.ACTION_PLAY_PAUSE | PlaybackState.ACTION_PLAY | PlaybackState.ACTION_PAUSE | PlaybackState.ACTION_SEEK_TO | PlaybackState.ACTION_SKIP_TO_PREVIOUS | PlaybackState.ACTION_SKIP_TO_NEXT);
final String playPauseTitle = isPlaying ? LocaleController.getString("AccActionPause", R.string.AccActionPause) : LocaleController.getString("AccActionPlay", R.string.AccActionPlay);
bldr.addAction(new Notification.Action.Builder(R.drawable.ic_action_previous, previousDescription, pendingPrev).build())
.addAction(new Notification.Action.Builder(isPlaying ? R.drawable.ic_action_pause : R.drawable.ic_action_play, playPauseTitle, pendingPlaypause).build())
.addAction(new Notification.Action.Builder(R.drawable.ic_action_next, nextDescription, pendingNext).build());
}
mediaSession.setPlaybackState(playbackState.build());
MediaMetadata.Builder meta = new MediaMetadata.Builder()
.putBitmap(MediaMetadata.METADATA_KEY_ALBUM_ART, fullAlbumArt)
.putString(MediaMetadata.METADATA_KEY_ALBUM_ARTIST, authorName)
.putString(MediaMetadata.METADATA_KEY_ARTIST, authorName)
.putLong(MediaMetadata.METADATA_KEY_DURATION, duration)
.putString(MediaMetadata.METADATA_KEY_TITLE, songName)
.putString(MediaMetadata.METADATA_KEY_ALBUM, audioInfo != null ? audioInfo.getAlbum() : null);
mediaSession.setMetadata(meta.build());
bldr.setVisibility(Notification.VISIBILITY_PUBLIC);
notification = bldr.build();
if (isPlaying) {
startForeground(ID_NOTIFICATION, notification);
} else {
stopForeground(false);
NotificationManager nm = (NotificationManager) getSystemService(NOTIFICATION_SERVICE);
nm.notify(ID_NOTIFICATION, notification);
}
} else {
RemoteViews simpleContentView = new RemoteViews(getApplicationContext().getPackageName(), R.layout.player_small_notification);
RemoteViews expandedView = null;
if (supportBigNotifications) {
expandedView = new RemoteViews(getApplicationContext().getPackageName(), R.layout.player_big_notification);
}
notification = new NotificationCompat.Builder(getApplicationContext())
.setSmallIcon(R.drawable.player)
.setContentIntent(contentIntent)
.setChannelId(NotificationsController.OTHER_NOTIFICATIONS_CHANNEL)
.setContentTitle(songName).build();
notification.contentView = simpleContentView;
if (supportBigNotifications) {
notification.bigContentView = expandedView;
}
setListeners(simpleContentView);
if (supportBigNotifications) {
setListeners(expandedView);
}
if (albumArt != null) {
notification.contentView.setImageViewBitmap(R.id.player_album_art, albumArt);
if (supportBigNotifications) {
notification.bigContentView.setImageViewBitmap(R.id.player_album_art, albumArt);
}
} else {
notification.contentView.setImageViewResource(R.id.player_album_art, R.drawable.nocover_small);
if (supportBigNotifications) {
notification.bigContentView.setImageViewResource(R.id.player_album_art, R.drawable.nocover_big);
}
}
if (MediaController.getInstance().isDownloadingCurrentMessage()) {
notification.contentView.setViewVisibility(R.id.player_pause, View.GONE);
notification.contentView.setViewVisibility(R.id.player_play, View.GONE);
notification.contentView.setViewVisibility(R.id.player_next, View.GONE);
notification.contentView.setViewVisibility(R.id.player_previous, View.GONE);
notification.contentView.setViewVisibility(R.id.player_progress_bar, View.VISIBLE);
if (supportBigNotifications) {
notification.bigContentView.setViewVisibility(R.id.player_pause, View.GONE);
notification.bigContentView.setViewVisibility(R.id.player_play, View.GONE);
notification.bigContentView.setViewVisibility(R.id.player_next, View.GONE);
notification.bigContentView.setViewVisibility(R.id.player_previous, View.GONE);
notification.bigContentView.setViewVisibility(R.id.player_progress_bar, View.VISIBLE);
}
} else {
notification.contentView.setViewVisibility(R.id.player_progress_bar, View.GONE);
notification.contentView.setViewVisibility(R.id.player_next, View.VISIBLE);
notification.contentView.setViewVisibility(R.id.player_previous, View.VISIBLE);
if (supportBigNotifications) {
notification.bigContentView.setViewVisibility(R.id.player_next, View.VISIBLE);
notification.bigContentView.setViewVisibility(R.id.player_previous, View.VISIBLE);
notification.bigContentView.setViewVisibility(R.id.player_progress_bar, View.GONE);
}
if (MediaController.getInstance().isMessagePaused()) {
notification.contentView.setViewVisibility(R.id.player_pause, View.GONE);
notification.contentView.setViewVisibility(R.id.player_play, View.VISIBLE);
if (supportBigNotifications) {
notification.bigContentView.setViewVisibility(R.id.player_pause, View.GONE);
notification.bigContentView.setViewVisibility(R.id.player_play, View.VISIBLE);
}
} else {
notification.contentView.setViewVisibility(R.id.player_pause, View.VISIBLE);
notification.contentView.setViewVisibility(R.id.player_play, View.GONE);
if (supportBigNotifications) {
notification.bigContentView.setViewVisibility(R.id.player_pause, View.VISIBLE);
notification.bigContentView.setViewVisibility(R.id.player_play, View.GONE);
}
}
}
notification.contentView.setTextViewText(R.id.player_song_name, songName);
notification.contentView.setTextViewText(R.id.player_author_name, authorName);
if (supportBigNotifications) {
notification.bigContentView.setTextViewText(R.id.player_song_name, songName);
notification.bigContentView.setTextViewText(R.id.player_author_name, authorName);
notification.bigContentView.setTextViewText(R.id.player_album_title, audioInfo != null && !TextUtils.isEmpty(audioInfo.getAlbum()) ? audioInfo.getAlbum() : "");
}
notification.flags |= Notification.FLAG_ONGOING_EVENT;
startForeground(ID_NOTIFICATION, notification);
}
if (remoteControlClient != null) {
int currentID = MediaController.getInstance().getPlayingMessageObject().getId();
if (notificationMessageID != currentID) {
notificationMessageID = currentID;
RemoteControlClient.MetadataEditor metadataEditor = remoteControlClient.editMetadata(true);
metadataEditor.putString(MediaMetadataRetriever.METADATA_KEY_ARTIST, authorName);
metadataEditor.putString(MediaMetadataRetriever.METADATA_KEY_TITLE, songName);
if (audioInfo != null && !TextUtils.isEmpty(audioInfo.getAlbum())) {
metadataEditor.putString(MediaMetadataRetriever.METADATA_KEY_ALBUM, audioInfo.getAlbum());
}
metadataEditor.putLong(MediaMetadataRetriever.METADATA_KEY_DURATION, MediaController.getInstance().getPlayingMessageObject().audioPlayerDuration * 1000L);
if (fullAlbumArt != null) {
try {
metadataEditor.putBitmap(RemoteControlClient.MetadataEditor.BITMAP_KEY_ARTWORK, fullAlbumArt);
} catch (Throwable e) {
FileLog.e(e);
}
}
metadataEditor.apply();
AndroidUtilities.runOnUIThread(new Runnable() {
@Override
public void run() {
if (remoteControlClient == null || MediaController.getInstance().getPlayingMessageObject() == null) {
return;
}
if (MediaController.getInstance().getPlayingMessageObject().audioPlayerDuration == C.TIME_UNSET) {
AndroidUtilities.runOnUIThread(this, 500);
return;
}
RemoteControlClient.MetadataEditor metadataEditor = remoteControlClient.editMetadata(false);
metadataEditor.putLong(MediaMetadataRetriever.METADATA_KEY_DURATION, MediaController.getInstance().getPlayingMessageObject().audioPlayerDuration * 1000L);
metadataEditor.apply();
if (Build.VERSION.SDK_INT >= 18) {
remoteControlClient.setPlaybackState(MediaController.getInstance().isMessagePaused() ? RemoteControlClient.PLAYSTATE_PAUSED : RemoteControlClient.PLAYSTATE_PLAYING,
Math.max(MediaController.getInstance().getPlayingMessageObject().audioProgressSec * 1000L, 100),
MediaController.getInstance().isMessagePaused() ? 0f : 1f);
} else {
remoteControlClient.setPlaybackState(MediaController.getInstance().isMessagePaused() ? RemoteControlClient.PLAYSTATE_PAUSED : RemoteControlClient.PLAYSTATE_PLAYING);
}
}
}, 1000);
}
if (MediaController.getInstance().isDownloadingCurrentMessage()) {
remoteControlClient.setPlaybackState(RemoteControlClient.PLAYSTATE_BUFFERING);
} else {
RemoteControlClient.MetadataEditor metadataEditor = remoteControlClient.editMetadata(false);
metadataEditor.putLong(MediaMetadataRetriever.METADATA_KEY_DURATION, MediaController.getInstance().getPlayingMessageObject().audioPlayerDuration * 1000L);
metadataEditor.apply();
if (Build.VERSION.SDK_INT >= 18) {
remoteControlClient.setPlaybackState(MediaController.getInstance().isMessagePaused() ? RemoteControlClient.PLAYSTATE_PAUSED : RemoteControlClient.PLAYSTATE_PLAYING,
Math.max(MediaController.getInstance().getPlayingMessageObject().audioProgressSec * 1000L, 100),
MediaController.getInstance().isMessagePaused() ? 0f : 1f);
} else {
remoteControlClient.setPlaybackState(MediaController.getInstance().isMessagePaused() ? RemoteControlClient.PLAYSTATE_PAUSED : RemoteControlClient.PLAYSTATE_PLAYING);
}
}
}
} | @SuppressLint("NewApi")
private void createNotification(MessageObject messageObject, boolean forBitmap) {
String songName = messageObject.getMusicTitle();
String authorName = messageObject.getMusicAuthor();
AudioInfo audioInfo = MediaController.getInstance().getAudioInfo();
Intent intent = new Intent(ApplicationLoader.applicationContext, LaunchActivity.class);
intent.setAction("com.tmessages.openplayer");
intent.addCategory(Intent.CATEGORY_LAUNCHER);
PendingIntent contentIntent = PendingIntent.getActivity(ApplicationLoader.applicationContext, 0, intent, Build.VERSION.SDK_INT >= Build.VERSION_CODES.S ? PendingIntent.FLAG_MUTABLE : 0);
Notification notification;
String artworkUrl = messageObject.getArtworkUrl(true);
String artworkUrlBig = messageObject.getArtworkUrl(false);
long duration = messageObject.getDuration() * 1000;
Bitmap albumArt = audioInfo != null ? audioInfo.getSmallCover() : null;
Bitmap fullAlbumArt = audioInfo != null ? audioInfo.getCover() : null;
loadingFilePath = null;
imageReceiver.setImageBitmap((BitmapDrawable) null);
if (albumArt == null && !TextUtils.isEmpty(artworkUrl)) {
fullAlbumArt = loadArtworkFromUrl(artworkUrlBig, true, !forBitmap);
if (fullAlbumArt == null) {
fullAlbumArt = albumArt = loadArtworkFromUrl(artworkUrl, false, !forBitmap);
} else {
albumArt = loadArtworkFromUrl(artworkUrlBig, false, !forBitmap);
}
} else {
loadingFilePath = FileLoader.getInstance(UserConfig.selectedAccount).getPathToAttach(messageObject.getDocument()).getAbsolutePath();
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
boolean isPlaying = !MediaController.getInstance().isMessagePaused();
PendingIntent pendingPrev = PendingIntent.getBroadcast(getApplicationContext(), 0, new Intent(NOTIFY_PREVIOUS).setComponent(new ComponentName(this, MusicPlayerReceiver.class)), Build.VERSION.SDK_INT >= Build.VERSION_CODES.S ? PendingIntent.FLAG_MUTABLE | PendingIntent.FLAG_CANCEL_CURRENT : PendingIntent.FLAG_CANCEL_CURRENT);
//PendingIntent pendingStop = PendingIntent.getBroadcast(getApplicationContext(), 0, new Intent(NOTIFY_CLOSE).setComponent(new ComponentName(this, MusicPlayerReceiver.class)), PendingIntent.FLAG_CANCEL_CURRENT);
PendingIntent pendingStop = PendingIntent.getService(getApplicationContext(), 0, new Intent(this, getClass()).setAction(getPackageName() + ".STOP_PLAYER"), Build.VERSION.SDK_INT >= Build.VERSION_CODES.S ? PendingIntent.FLAG_MUTABLE | PendingIntent.FLAG_CANCEL_CURRENT : PendingIntent.FLAG_CANCEL_CURRENT);
PendingIntent pendingPlaypause = PendingIntent.getBroadcast(getApplicationContext(), 0, new Intent(isPlaying ? NOTIFY_PAUSE : NOTIFY_PLAY).setComponent(new ComponentName(this, MusicPlayerReceiver.class)), Build.VERSION.SDK_INT >= Build.VERSION_CODES.S ? PendingIntent.FLAG_MUTABLE | PendingIntent.FLAG_CANCEL_CURRENT : PendingIntent.FLAG_CANCEL_CURRENT);
PendingIntent pendingNext = PendingIntent.getBroadcast(getApplicationContext(), 0, new Intent(NOTIFY_NEXT).setComponent(new ComponentName(this, MusicPlayerReceiver.class)), Build.VERSION.SDK_INT >= Build.VERSION_CODES.S ? PendingIntent.FLAG_MUTABLE | PendingIntent.FLAG_CANCEL_CURRENT : PendingIntent.FLAG_CANCEL_CURRENT);
PendingIntent pendingSeek = PendingIntent.getBroadcast(getApplicationContext(), 0, new Intent(NOTIFY_SEEK).setComponent(new ComponentName(this, MusicPlayerReceiver.class)), Build.VERSION.SDK_INT >= Build.VERSION_CODES.S ? PendingIntent.FLAG_MUTABLE | PendingIntent.FLAG_CANCEL_CURRENT : PendingIntent.FLAG_CANCEL_CURRENT);
Notification.Builder bldr = new Notification.Builder(this);
bldr.setSmallIcon(R.drawable.player)
.setOngoing(isPlaying)
.setContentTitle(songName)
.setContentText(authorName)
.setSubText(audioInfo != null ? audioInfo.getAlbum() : null)
.setContentIntent(contentIntent)
.setDeleteIntent(pendingStop)
.setShowWhen(false)
.setCategory(Notification.CATEGORY_TRANSPORT)
.setPriority(Notification.PRIORITY_MAX)
.setStyle(new Notification.MediaStyle()
.setMediaSession(mediaSession.getSessionToken())
.setShowActionsInCompactView(0, 1, 2));
if (Build.VERSION.SDK_INT >= 26) {
NotificationsController.checkOtherNotificationsChannel();
bldr.setChannelId(NotificationsController.OTHER_NOTIFICATIONS_CHANNEL);
}
if (albumArt != null) {
bldr.setLargeIcon(albumArt);
} else {
bldr.setLargeIcon(albumArtPlaceholder);
}
final String nextDescription = LocaleController.getString("Next", R.string.Next);
final String previousDescription = LocaleController.getString("AccDescrPrevious", R.string.AccDescrPrevious);
if (MediaController.getInstance().isDownloadingCurrentMessage()) {
playbackState.setState(PlaybackState.STATE_BUFFERING, 0, 1).setActions(0);
bldr.addAction(new Notification.Action.Builder(R.drawable.ic_action_previous, previousDescription, pendingPrev).build())
.addAction(new Notification.Action.Builder(R.drawable.loading_animation2, LocaleController.getString("Loading", R.string.Loading), null).build())
.addAction(new Notification.Action.Builder(R.drawable.ic_action_next, nextDescription, pendingNext).build());
} else {
playbackState.setState(isPlaying ? PlaybackState.STATE_PLAYING : PlaybackState.STATE_PAUSED,
MediaController.getInstance().getPlayingMessageObject().audioProgressSec * 1000L,
isPlaying ? 1 : 0)
.setActions(PlaybackState.ACTION_PLAY_PAUSE | PlaybackState.ACTION_PLAY | PlaybackState.ACTION_PAUSE | PlaybackState.ACTION_SEEK_TO | PlaybackState.ACTION_SKIP_TO_PREVIOUS | PlaybackState.ACTION_SKIP_TO_NEXT);
final String playPauseTitle = isPlaying ? LocaleController.getString("AccActionPause", R.string.AccActionPause) : LocaleController.getString("AccActionPlay", R.string.AccActionPlay);
bldr.addAction(new Notification.Action.Builder(R.drawable.ic_action_previous, previousDescription, pendingPrev).build())
.addAction(new Notification.Action.Builder(isPlaying ? R.drawable.ic_action_pause : R.drawable.ic_action_play, playPauseTitle, pendingPlaypause).build())
.addAction(new Notification.Action.Builder(R.drawable.ic_action_next, nextDescription, pendingNext).build());
}
mediaSession.setPlaybackState(playbackState.build());
MediaMetadata.Builder meta = new MediaMetadata.Builder()
.putBitmap(MediaMetadata.METADATA_KEY_ALBUM_ART, fullAlbumArt)
.putString(MediaMetadata.METADATA_KEY_ALBUM_ARTIST, authorName)
.putString(MediaMetadata.METADATA_KEY_ARTIST, authorName)
.putLong(MediaMetadata.METADATA_KEY_DURATION, duration)
.putString(MediaMetadata.METADATA_KEY_TITLE, songName)
.putString(MediaMetadata.METADATA_KEY_ALBUM, audioInfo != null ? audioInfo.getAlbum() : null);
mediaSession.setMetadata(meta.build());
bldr.setVisibility(Notification.VISIBILITY_PUBLIC);
notification = bldr.build();
if (isPlaying) {
startForeground(ID_NOTIFICATION, notification);
} else {
stopForeground(false);
NotificationManager nm = (NotificationManager) getSystemService(NOTIFICATION_SERVICE);
nm.notify(ID_NOTIFICATION, notification);
}
} else {
RemoteViews simpleContentView = new RemoteViews(getApplicationContext().getPackageName(), R.layout.player_small_notification);
RemoteViews expandedView = null;
if (supportBigNotifications) {
expandedView = new RemoteViews(getApplicationContext().getPackageName(), R.layout.player_big_notification);
}
notification = new NotificationCompat.Builder(getApplicationContext())
.setSmallIcon(R.drawable.player)
.setContentIntent(contentIntent)
.setChannelId(NotificationsController.OTHER_NOTIFICATIONS_CHANNEL)
.setContentTitle(songName).build();
notification.contentView = simpleContentView;
if (supportBigNotifications) {
notification.bigContentView = expandedView;
}
setListeners(simpleContentView);
if (supportBigNotifications) {
setListeners(expandedView);
}
if (albumArt != null) {
notification.contentView.setImageViewBitmap(R.id.player_album_art, albumArt);
if (supportBigNotifications) {
notification.bigContentView.setImageViewBitmap(R.id.player_album_art, albumArt);
}
} else {
notification.contentView.setImageViewResource(R.id.player_album_art, R.drawable.nocover_small);
if (supportBigNotifications) {
notification.bigContentView.setImageViewResource(R.id.player_album_art, R.drawable.nocover_big);
}
}
if (MediaController.getInstance().isDownloadingCurrentMessage()) {
notification.contentView.setViewVisibility(R.id.player_pause, View.GONE);
notification.contentView.setViewVisibility(R.id.player_play, View.GONE);
notification.contentView.setViewVisibility(R.id.player_next, View.GONE);
notification.contentView.setViewVisibility(R.id.player_previous, View.GONE);
notification.contentView.setViewVisibility(R.id.player_progress_bar, View.VISIBLE);
if (supportBigNotifications) {
notification.bigContentView.setViewVisibility(R.id.player_pause, View.GONE);
notification.bigContentView.setViewVisibility(R.id.player_play, View.GONE);
notification.bigContentView.setViewVisibility(R.id.player_next, View.GONE);
notification.bigContentView.setViewVisibility(R.id.player_previous, View.GONE);
notification.bigContentView.setViewVisibility(R.id.player_progress_bar, View.VISIBLE);
}
} else {
notification.contentView.setViewVisibility(R.id.player_progress_bar, View.GONE);
notification.contentView.setViewVisibility(R.id.player_next, View.VISIBLE);
notification.contentView.setViewVisibility(R.id.player_previous, View.VISIBLE);
if (supportBigNotifications) {
notification.bigContentView.setViewVisibility(R.id.player_next, View.VISIBLE);
notification.bigContentView.setViewVisibility(R.id.player_previous, View.VISIBLE);
notification.bigContentView.setViewVisibility(R.id.player_progress_bar, View.GONE);
}
if (MediaController.getInstance().isMessagePaused()) {
notification.contentView.setViewVisibility(R.id.player_pause, View.GONE);
notification.contentView.setViewVisibility(R.id.player_play, View.VISIBLE);
if (supportBigNotifications) {
notification.bigContentView.setViewVisibility(R.id.player_pause, View.GONE);
notification.bigContentView.setViewVisibility(R.id.player_play, View.VISIBLE);
}
} else {
notification.contentView.setViewVisibility(R.id.player_pause, View.VISIBLE);
notification.contentView.setViewVisibility(R.id.player_play, View.GONE);
if (supportBigNotifications) {
notification.bigContentView.setViewVisibility(R.id.player_pause, View.VISIBLE);
notification.bigContentView.setViewVisibility(R.id.player_play, View.GONE);
}
}
}
notification.contentView.setTextViewText(R.id.player_song_name, songName);
notification.contentView.setTextViewText(R.id.player_author_name, authorName);
if (supportBigNotifications) {
notification.bigContentView.setTextViewText(R.id.player_song_name, songName);
notification.bigContentView.setTextViewText(R.id.player_author_name, authorName);
notification.bigContentView.setTextViewText(R.id.player_album_title, audioInfo != null && !TextUtils.isEmpty(audioInfo.getAlbum()) ? audioInfo.getAlbum() : "");
}
notification.flags |= Notification.FLAG_ONGOING_EVENT;
startForeground(ID_NOTIFICATION, notification);
}
if (remoteControlClient != null) {
int currentID = MediaController.getInstance().getPlayingMessageObject().getId();
if (notificationMessageID != currentID) {
notificationMessageID = currentID;
RemoteControlClient.MetadataEditor metadataEditor = remoteControlClient.editMetadata(true);
metadataEditor.putString(MediaMetadataRetriever.METADATA_KEY_ARTIST, authorName);
metadataEditor.putString(MediaMetadataRetriever.METADATA_KEY_TITLE, songName);
if (audioInfo != null && !TextUtils.isEmpty(audioInfo.getAlbum())) {
metadataEditor.putString(MediaMetadataRetriever.METADATA_KEY_ALBUM, audioInfo.getAlbum());
}
metadataEditor.putLong(MediaMetadataRetriever.METADATA_KEY_DURATION, MediaController.getInstance().getPlayingMessageObject().audioPlayerDuration * 1000L);
if (fullAlbumArt != null) {
try {
metadataEditor.putBitmap(RemoteControlClient.MetadataEditor.BITMAP_KEY_ARTWORK, fullAlbumArt);
} catch (Throwable e) {
FileLog.e(e);
}
}
metadataEditor.apply();
AndroidUtilities.runOnUIThread(new Runnable() {
@Override
public void run() {
if (remoteControlClient == null || MediaController.getInstance().getPlayingMessageObject() == null) {
return;
}
if (MediaController.getInstance().getPlayingMessageObject().audioPlayerDuration == C.TIME_UNSET) {
AndroidUtilities.runOnUIThread(this, 500);
return;
}
RemoteControlClient.MetadataEditor metadataEditor = remoteControlClient.editMetadata(false);
metadataEditor.putLong(MediaMetadataRetriever.METADATA_KEY_DURATION, MediaController.getInstance().getPlayingMessageObject().audioPlayerDuration * 1000L);
metadataEditor.apply();
if (Build.VERSION.SDK_INT >= 18) {
remoteControlClient.setPlaybackState(MediaController.getInstance().isMessagePaused() ? RemoteControlClient.PLAYSTATE_PAUSED : RemoteControlClient.PLAYSTATE_PLAYING,
Math.max(MediaController.getInstance().getPlayingMessageObject().audioProgressSec * 1000L, 100),
MediaController.getInstance().isMessagePaused() ? 0f : 1f);
} else {
remoteControlClient.setPlaybackState(MediaController.getInstance().isMessagePaused() ? RemoteControlClient.PLAYSTATE_PAUSED : RemoteControlClient.PLAYSTATE_PLAYING);
}
}
}, 1000);
}
if (MediaController.getInstance().isDownloadingCurrentMessage()) {
remoteControlClient.setPlaybackState(RemoteControlClient.PLAYSTATE_BUFFERING);
} else {
RemoteControlClient.MetadataEditor metadataEditor = remoteControlClient.editMetadata(false);
metadataEditor.putLong(MediaMetadataRetriever.METADATA_KEY_DURATION, MediaController.getInstance().getPlayingMessageObject().audioPlayerDuration * 1000L);
metadataEditor.apply();
if (Build.VERSION.SDK_INT >= 18) {
remoteControlClient.setPlaybackState(MediaController.getInstance().isMessagePaused() ? RemoteControlClient.PLAYSTATE_PAUSED : RemoteControlClient.PLAYSTATE_PLAYING,
Math.max(MediaController.getInstance().getPlayingMessageObject().audioProgressSec * 1000L, 100),
MediaController.getInstance().isMessagePaused() ? 0f : 1f);
} else {
remoteControlClient.setPlaybackState(MediaController.getInstance().isMessagePaused() ? RemoteControlClient.PLAYSTATE_PAUSED : RemoteControlClient.PLAYSTATE_PLAYING);
}
}
}
} | build(deps): bump checker-qual from 3.24.0 to 3.25.0 (#49)
Bumps [checker-qual](https://github.com/typetools/checker-framework) from 3.24.0 to 3.25.0.
- [Release notes](https://github.com/typetools/checker-framework/releases)
- [Changelog](https://github.com/typetools/checker-framework/blob/master/docs/CHANGELOG.md)
- [Commits](https://github.com/typetools/checker-framework/compare/checker-framework-3.24.0...checker-framework-3.25.0)
---
updated-dependencies:
- dependency-name: org.checkerframework:checker-qual
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | https://github.com/PreviousAlone/Nnngram/commit/f43ec04f5d65146a313da682fb3bcee82ee1aa4e | null | null | TMessagesProj/src/main/java/org/telegram/messenger/MusicPlayerService.java | 0 | java | false | 2022-09-09T14:36:49Z |
static void
sent_by(pbelle_sip_messageParser ctx)
{
const char* host93;
#undef RETURN_TYPE_host93
#define RETURN_TYPE_host93 const char*
belle_sip_messageParser_port_return port94;
#undef RETURN_TYPE_port94
#define RETURN_TYPE_port94 belle_sip_messageParser_port_return
/* Initialize rule variables
*/
{
// ../grammars/belle_sip_message.g:1435:3: ( host ( COLON port )? )
// ../grammars/belle_sip_message.g:1435:7: host ( COLON port )?
{
FOLLOWPUSH(FOLLOW_host_in_sent_by5499);
host93=host(ctx);
FOLLOWPOP();
if (HASEXCEPTION())
{
goto rulesent_byEx;
}
if (HASFAILED())
{
return ;
}
if ( BACKTRACKING==0 )
{
belle_sip_header_via_set_host((SCOPE_TOP(header_via))->current,
host93
);
}
// ../grammars/belle_sip_message.g:1436:6: ( COLON port )?
{
int alt130=2;
switch ( LA(1) )
{
case COLON:
{
alt130=1;
}
break;
}
switch (alt130)
{
case 1:
// ../grammars/belle_sip_message.g:1436:8: COLON port
{
MATCHT(COLON, &FOLLOW_COLON_in_sent_by5510);
if (HASEXCEPTION())
{
goto rulesent_byEx;
}
if (HASFAILED())
{
return ;
}
FOLLOWPUSH(FOLLOW_port_in_sent_by5512);
port94=port(ctx);
FOLLOWPOP();
if (HASEXCEPTION())
{
goto rulesent_byEx;
}
if (HASFAILED())
{
return ;
}
if ( BACKTRACKING==0 )
{
belle_sip_header_via_set_port((SCOPE_TOP(header_via))->current,
port94.ret
);
}
}
break;
}
}
}
}
// This is where rules clean up and exit
//
goto rulesent_byEx; /* Prevent compiler warnings */
rulesent_byEx: ;
if (HASEXCEPTION())
{
// This is ugly. We set the exception type to ANTLR3_RECOGNITION_EXCEPTION so we can always
// catch them.
//PREPORTERROR();
EXCEPTION->type = ANTLR3_RECOGNITION_EXCEPTION;
}
return ;
} | static void
sent_by(pbelle_sip_messageParser ctx)
{
const char* host93;
#undef RETURN_TYPE_host93
#define RETURN_TYPE_host93 const char*
belle_sip_messageParser_port_return port94;
#undef RETURN_TYPE_port94
#define RETURN_TYPE_port94 belle_sip_messageParser_port_return
/* Initialize rule variables
*/
{
// ../grammars/belle_sip_message.g:1439:3: ( host ( COLON port )? )
// ../grammars/belle_sip_message.g:1439:7: host ( COLON port )?
{
FOLLOWPUSH(FOLLOW_host_in_sent_by5499);
host93=host(ctx);
FOLLOWPOP();
if (HASEXCEPTION())
{
goto rulesent_byEx;
}
if (HASFAILED())
{
return ;
}
if ( BACKTRACKING==0 )
{
belle_sip_header_via_set_host((SCOPE_TOP(header_via))->current,
host93
);
}
// ../grammars/belle_sip_message.g:1440:6: ( COLON port )?
{
int alt130=2;
switch ( LA(1) )
{
case COLON:
{
alt130=1;
}
break;
}
switch (alt130)
{
case 1:
// ../grammars/belle_sip_message.g:1440:8: COLON port
{
MATCHT(COLON, &FOLLOW_COLON_in_sent_by5510);
if (HASEXCEPTION())
{
goto rulesent_byEx;
}
if (HASFAILED())
{
return ;
}
FOLLOWPUSH(FOLLOW_port_in_sent_by5512);
port94=port(ctx);
FOLLOWPOP();
if (HASEXCEPTION())
{
goto rulesent_byEx;
}
if (HASFAILED())
{
return ;
}
if ( BACKTRACKING==0 )
{
belle_sip_header_via_set_port((SCOPE_TOP(header_via))->current,
port94.ret
);
}
}
break;
}
}
}
}
// This is where rules clean up and exit
//
goto rulesent_byEx; /* Prevent compiler warnings */
rulesent_byEx: ;
if (HASEXCEPTION())
{
// This is ugly. We set the exception type to ANTLR3_RECOGNITION_EXCEPTION so we can always
// catch them.
//PREPORTERROR();
EXCEPTION->type = ANTLR3_RECOGNITION_EXCEPTION;
}
return ;
} | Fix crash while receiving some kind of invalid from header. | https://github.com/BelledonneCommunications/belle-sip/commit/116e3eb48fe43ea63eb9f3c4b4b30c48d58d6ff0 | null | null | src/grammars/belle_sip_messageParser.c | 0 | c | false | 2021-05-13T14:08:12Z |
function K(t){return P(t)?Array.isArray(t)?t.map((function(t){return{key:t,val:t}})):Object.keys(t).map((function(e){return{key:e,val:t[e]}})):[]} | function K(t){return Array.isArray(t)||g(t)} | YetiForce CRM ver. 6.4.0 (#16359)
* Added improvements in record collector
* Integration with UaYouControl.php (#16293)
Co-authored-by: Mariusz Krzaczkowski <m.krzaczkowski@yetiforce.com>
* Integration with UaYouControl.php (#16293)
* Add external link to NoBrregEnhetsregisteret. (#16292)
* Add external link to NoBrregEnhetsregisteret. #16292
* Add NorthData to RecordCollectors. (#16278)
* Add NorthData to RecordCollectors.
* Change docs.
Co-authored-by: Mariusz Krzaczkowski <m.krzaczkowski@yetiforce.com>
* Fix #16311
* Added conditions wizard for 'Update related record' workflow action
* Add NorthData to RecordCollectors. (#16278)
* Code improvements
* Added improvements in record collector
* Zefix integraion [in progress] (#16281)
* Zefix integraion [in progress]
* ChZefix integration.
Co-authored-by: Mariusz Krzaczkowski <m.krzaczkowski@yetiforce.com>
* Improved workflow action
* Added improvements in record collector
* Improvements in the store
* Update RecordCollector tests
* Code improvements
* Improved ConfReport
* languages/en-US/Other/RecordCollector.json
* Improved change module type
* Improved default dashboard in api portal
* Fix Send PDF workflow task
* Improved default dashboard in api
* Fixed attachments in 'Emails to send' panel
* lib_roundcube 0.3.0 Roundcube Webmail 1.6.0
* tests
* tests
* Update tests.yml
* Added improvements in record collector
* tests/setup/dependency.sh
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* Code improvements
* Update dependencies
* Added minor improvements
* tests
* Added improvements in record collector
* Added improvements in record collector
* Added minor improvements
* Added minor improvements
* Added minor improvements
* Improved import file button
* Improved imap connection
* Fix #16317 - list view entries count
* Added minor code improvements
* Improved menu items
* Added improvements in record collector
* Fix gantt view (#15772)
* Added improvements in record collector
* Added improvements in record collector
* Added improvements in record collector
* Updated graphics in store
* Update install translations
* Update fonts
* Improved OSSMail template
* Updated graphics in store
* Update fonts
* tests
* tests Validator
* Update icons
* Improved widgets permissions (#15613)
* Increase scrolling speed (#15031)
* Added tracking to media management
* Added improvements in record collector
* Added improvements in record collector
* Added improvements in record collector
* Added minor code improvements
* tests
* Added minor code improvements
* Fixed #15164 (#16319)
* Some changes in Import module (#16318)
* Code formatting
* Added improvements in record collector
* Change the library "sonata-project / google-authenticator" to "pragmarx/google2fa"
* Update dependencies
* Update dependencies
* Updated *.min and *.map files
* Change the library "sonata-project / google-authenticator" to "pragmarx/google2fa"
* Added minor improvements in Composer::install
* Update dev dependency
* Added dropdown button to record collectors (#16322)
* Corrected Record collectors table width (#16323)
* Fixed #15183 modulesMapRelatedFields don`t work correct for multipicklist
* Added minor improvements in Credits
* Fix edit view header links
* Improved Inventory panel and PDF widget
* Added improvements in record collector
* Added improvements in record collector
* Update install translations
* #16282 Improved the handler from getting coordinates to the map
* Added minor code improvements
* Fixed getting reference module in inventory name field (#16329)
* Missing icons update
* Improved tree field type
* Improved tests and some code
* Fix tree field type
* Fix scheme for tree data table
* Improved switch users
* Improved YetiForce CLI
* [PROD](renovate) Update dependency github/super-linter to v4.9.6 (#16324)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
* Bump giggsey/libphonenumber-for-php from 8.12.52 to 8.12.53 (#16331)
Bumps [giggsey/libphonenumber-for-php](https://github.com/giggsey/libphonenumber-for-php) from 8.12.52 to 8.12.53.
- [Release notes](https://github.com/giggsey/libphonenumber-for-php/releases)
- [Commits](https://github.com/giggsey/libphonenumber-for-php/compare/8.12.52...8.12.53)
---
updated-dependencies:
- dependency-name: giggsey/libphonenumber-for-php
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Improved switch users
* Improved switch users
* Unused code has been removed
* Fix tree field type
* Added improvements in record collector
* Improved integration with DAV
* Improved conditions wizard for 'Update related record' workflow action
* Fixed focus to search text field when click on select2 drop down in modal window
* Added improvements in record collector
* Added minor code improvements
* Improved ConfReport
* Improved .htaccess
* Added minor code improvements
* Improved ConfReport
* Fix icon on tree field type and change icon management view
* Removed unused code. "Is added" - condition in workflows (#16321)
* Correct setting of check boxes of Inventory boolean fields depending on their values. (#16326)
* Update Inventory.js
Now the check-boxes of Inventory boolean fields will be set correctly regarding to their content.
* README.md (#16332)
* Improve inventory auto fill
* Improved getting data from smtp (#16334)
* Fixed #13136 (#16335)
* Improved DB structure for map table cache
* Improved updating payment status (#16327)
* Improved updating payment status
* Corrected translation (#16336)
* Removed translation (#16337)
* A functionality has been added to unlock e-mail accounts
* Fix #13486
* Update dependencies
* mbstring.func_overload
* Added priority to CalendarActivities and OverdueActivities dashboard … (#16276)
* Added priority to CalendarActivities and OverdueActivities dashboard widgets
* Added improvement
* Hidden icon for previewing replies in comments (#16339)
* The display of the multi email field has been improved
* Added working time counter widget. (#16316)
* Added working time counter widget.
* Added translation
* Added improvements
* Removed varialbe
* Corrected comment
* Added title to buttons
* Added type to variable
* Removed redundant characters
* Added working time counter widget. #16316
* Added minor improvements
* Improoved dashboard titles
* Updated *.min and *.map files
* Added minor improvements in languages
* Updated translation
* Improvements have been added to the integration with WAPRO ERP
* Update install translations
* Update translations
* Update translations
* Added improvements in record collector
* Added improvements in record collector
* Improved input data cleanup
* Improved RSS
* Improved Rss
* Update all Yarn dependencies (2022-08-15) (#16344)
Co-authored-by: depfu[bot] <23717796+depfu[bot]@users.noreply.github.com>
* Added improvements in record collector
* Improvements in the mechanism of generating PDF files
* YetiForcePDF update v0.1.40 & Update dependencies
* Improved some config templates
* Added minor improvements
* Remove unnecessary code
* .github/workflows/actions.yml
* .github/workflows/actions.yml
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* Improved Db importer/updater
* Added buttons to the Working hours counter widget (#16340)
* Added buttons to the Working hours counter widget
* Added translations
* Improved widget
* Added button lock when starting timing
* Update translations
* Added missing translation
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* Added missing translation
* .github/workflows/tests.yml
* Removed Translation (#16347)
Co-authored-by: Radosław Skrzypczak <r.skrzypczak@yetiforce.com>
* Update install translations
* Added minor improvement in get actual version of PHP
* Update install translations
* Updated *.min and *.map files
* Redundant code has been removed
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* .github/workflows/tests.yml
* Improved RSS
* Added improvements
* Update DEV dependencies
* Fix Completions initialization in comments widget (#16348)
* Update fonts
* Fixed sending files in API for PUT method
* Update DEV dependencies
* Improved valid of time in Business Hours (#16351)
* Improved executing workflow when an unsupported operator is selected (#16352)
* Improved executing workflow when an unsupported operator is selected
* Improved getting translation (#16350)
* Improved Importer
* Improved working time counter widget
* Improved api
* Expansion of the tests
* Expansion of the tests
* tests
* Update DEV dependencies
* Improved Rss
* tests
* Value display secured
* Added improvements
* Improved index name
* Improved validation of quantity field (#16355)
* Improved validation of quantity field
* Improved code
* Add missing picklist dependencies
* Added validation whether at least one business day has been selected in the Business hours module (#16356)
* Compile js
* Moved swagger file
* Improved swagger generating functions
* Added minor improvements
* Fixed issue with date format
* Added improvements
* Fixed a bug when selecting all users in the calendar quick edit view (#16357)
* Improved swagger generating functions
* Added improvements
* Added improvements
* Added improvements
* Improved Address Search panel
* Improved Emails to send panel
* Fix action name
* Fix description in docBlock
* tests/Settings/ApiAddress.php
* Compile js
* tests/Settings/ApiAddress.php
* tests/Settings/ApiAddress.php
* Remove html unnecessary class
* Fixed #14266 (#16349)
* [PROD](renovate) Update debian Docker tag to v11 (#16341)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
* Improved anonymization
* Added improvements
* Update install translations
* Improved config class
* Added improvements
* Improved generatedtype for some fields
* Fixed #15631 (#16358)
* Added improvements
* Improved block sequence
* 6.4.0
Co-authored-by: rembiesa <103192653+rembiesa@users.noreply.github.com>
Co-authored-by: Radosław Skrzypczak <r.skrzypczak@yetiforce.com>
Co-authored-by: Adrian Koń <a.kon@yetiforce.com>
Co-authored-by: bmankowski <bmankowski@gmail.com>
Co-authored-by: Arek Solek <arkadiusz_s9887@wp.pl>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jared Ramon Elizan <elizanjaredr@gmail.com>
Co-authored-by: depfu[bot] <23717796+depfu[bot]@users.noreply.github.com> | https://github.com/yetiforcecompany/yetiforcecrm/commit/2c14baaf8dbc7fd82d5c585f2fa0c23528450618 | CVE-2022-1340 | ['CWE-79'] | public_html/layouts/resources/views/KnowledgeBase/KnowledgeBase.vue.js | 0 | js | false | 2022-08-19T11:40:16Z |
function je(t,e){for(var n=t.attrsList,r=0,i=n.length;r<i;r++){var o=n[r];if(e.test(o.name))return n.splice(r,1),o}} | function je(t,e){if(!(e&&(t._directInactive=!0,Oe(t))||t._inactive)){t._inactive=!0;for(var n=0;n<t.$children.length;n++)je(t.$children[n]);Pe(t,"deactivated")}} | Build assets | https://github.com/PrestaShop/PrestaShop/commit/011d8831a9a7b619aecb49208db5d0a36b9677d1 | CVE-2020-6632 | ['CWE-79'] | admin-dev/themes/new-theme/public/translations.bundle.js | 0 | js | false | null |
function gl(e,t){for(var n=e.callbackNode,r=e.suspendedLanes,i=e.pingedLanes,o=e.expirationTimes,s=e.pendingLanes;0<s;){var l=31-Ht(s),u=1<<l,c=o[l];if(-1===c){if(0==(u&r)||0!=(u&i)){c=t,Dt(u);var d=Rt;o[l]=10<=d?c+250:6<=d?c+5e3:-1}}else c<=t&&(e.expiredLanes|=u);s&=~u}if(r=It(e,e===Ps?Rs:0),t=Rt,0===r)null!==n&&(n!==Di&&Ei(n),e.callbackNode=null,e.callbackPriority=0);else{if(null!==n){if(e.callbackPriority===t)return;n!==Di&&Ei(n)}15===t?(n=bl.bind(null,e),null===Fi?(Fi=[n],qi=Ci(Mi,Ki)):Fi.push(n),n=Di):14===t?n=$i(99,bl.bind(null,e)):(n=function(e){switch(e){case 15:case 14:return 99;case 13:case 12:case 11:case 10:return 98;case 9:case 8:case 7:case 6:case 4:case 5:return 97;case 3:case 2:case 1:return 95;case 0:return 90;default:throw Error(a(358,e))}}(t),n=$i(n,vl.bind(null,e))),e.callbackPriority=t,e.callbackNode=n}} | function gl(e,t){for(var n=e.callbackNode,r=e.suspendedLanes,i=e.pingedLanes,o=e.expirationTimes,s=e.pendingLanes;0<s;){var l=31-Bt(s),c=1<<l,u=o[l];if(-1===u){if(0==(c&r)||0!=(c&i)){u=t,Rt(c);var d=Dt;o[l]=10<=d?u+250:6<=d?u+5e3:-1}}else u<=t&&(e.expiredLanes|=c);s&=~c}if(r=It(e,e===Ms?Ds:0),t=Dt,0===r)null!==n&&(n!==Ri&&Ei(n),e.callbackNode=null,e.callbackPriority=0);else{if(null!==n){if(e.callbackPriority===t)return;n!==Ri&&Ei(n)}15===t?(n=bl.bind(null,e),null===qi?(qi=[n],Fi=Ci(Ai,Yi)):qi.push(n),n=Ri):14===t?n=$i(99,bl.bind(null,e)):(n=function(e){switch(e){case 15:case 14:return 99;case 13:case 12:case 11:case 10:return 98;case 9:case 8:case 7:case 6:case 4:case 5:return 97;case 3:case 2:case 1:return 95;case 0:return 90;default:throw Error(a(358,e))}}(t),n=$i(n,vl.bind(null,e))),e.callbackPriority=t,e.callbackNode=n}} | fix: OPTIC-179: Properly ensure content is escaped (#4926)
* fix: OPTIC-179: Properly ensure content is escaped
* ci: Build frontend
Workflow run: https://github.com/HumanSignal/label-studio/actions/runs/6567480699
* update types
* ci: Build frontend
Workflow run: https://github.com/HumanSignal/label-studio/actions/runs/6567539053
* update
* ci: Build frontend
Workflow run: https://github.com/HumanSignal/label-studio/actions/runs/6568061660
* update
* Update label_studio/frontend/src/pages/DataManager/DataManager.js
* ci: Build frontend
Workflow run: https://github.com/HumanSignal/label-studio/actions/runs/6568126554
---------
Co-authored-by: robot-ci-heartex <robot-ci-heartex@users.noreply.github.com> | https://github.com/HumanSignal/label-studio/commit/a7a71e594f32ec4af8f3f800d5ccb8662e275da3 | CVE-2023-47115 | ['CWE-79'] | label_studio/frontend/dist/react-app/index.js | 0 | js | false | 2023-10-19T01:22:32Z |
async def action_for_event_by_user(
self, event: EventBase, context: EventContext
) -> None:
"""Given an event and context, evaluate the push rules, check if the message
should increment the unread count, and insert the results into the
event_push_actions_staging table.
"""
if event.internal_metadata.is_outlier():
# This can happen due to out of band memberships
return
# Disable counting as unread unless the experimental configuration is
# enabled, as it can cause additional (unwanted) rows to be added to the
# event_push_actions table.
count_as_unread = False
if self.hs.config.experimental.msc2654_enabled:
count_as_unread = _should_count_as_unread(event, context)
rules_by_user = await self._get_rules_for_event(event)
actions_by_user: Dict[str, Collection[Union[Mapping, str]]] = {}
room_member_count = await self.store.get_number_joined_users_in_room(
event.room_id
)
(
power_levels,
sender_power_level,
) = await self._get_power_levels_and_sender_level(event, context)
relations = await self._get_mutual_relations(
event, itertools.chain(*rules_by_user.values())
)
evaluator = PushRuleEvaluatorForEvent(
event,
room_member_count,
sender_power_level,
power_levels,
relations,
self._relations_match_enabled,
)
users = rules_by_user.keys()
profiles = await self.store.get_subset_users_in_room_with_profiles(
event.room_id, users
)
# This is a check for the case where user joins a room without being
# allowed to see history, and then the server receives a delayed event
# from before the user joined, which they should not be pushed for
uids_with_visibility = await filter_event_for_clients_with_state(
self.store, users, event, context
)
for uid, rules in rules_by_user.items():
if event.sender == uid:
continue
if uid not in uids_with_visibility:
continue
display_name = None
profile = profiles.get(uid)
if profile:
display_name = profile.display_name
if not display_name:
# Handle the case where we are pushing a membership event to
# that user, as they might not be already joined.
if event.type == EventTypes.Member and event.state_key == uid:
display_name = event.content.get("displayname", None)
if not isinstance(display_name, str):
display_name = None
if count_as_unread:
# Add an element for the current user if the event needs to be marked as
# unread, so that add_push_actions_to_staging iterates over it.
# If the event shouldn't be marked as unread but should notify the
# current user, it'll be added to the dict later.
actions_by_user[uid] = []
for rule, enabled in rules:
if not enabled:
continue
matches = evaluator.check_conditions(rule.conditions, uid, display_name)
if matches:
actions = [x for x in rule.actions if x != "dont_notify"]
if actions and "notify" in actions:
# Push rules say we should notify the user of this event
actions_by_user[uid] = actions
break
# Mark in the DB staging area the push actions for users who should be
# notified for this event. (This will then get handled when we persist
# the event)
await self.store.add_push_actions_to_staging(
event.event_id,
actions_by_user,
count_as_unread,
) | async def action_for_event_by_user(
self, event: EventBase, context: EventContext
) -> None:
"""Given an event and context, evaluate the push rules, check if the message
should increment the unread count, and insert the results into the
event_push_actions_staging table.
"""
if event.internal_metadata.is_outlier():
# This can happen due to out of band memberships
return
# Disable counting as unread unless the experimental configuration is
# enabled, as it can cause additional (unwanted) rows to be added to the
# event_push_actions table.
count_as_unread = False
if self.hs.config.experimental.msc2654_enabled:
count_as_unread = _should_count_as_unread(event, context)
rules_by_user = await self._get_rules_for_event(event)
actions_by_user: Dict[str, Collection[Union[Mapping, str]]] = {}
room_member_count = await self.store.get_number_joined_users_in_room(
event.room_id
)
(
power_levels,
sender_power_level,
) = await self._get_power_levels_and_sender_level(event, context)
relation = relation_from_event(event)
# If the event does not have a relation, then cannot have any mutual
# relations or thread ID.
relations = {}
thread_id = "main"
if relation:
relations = await self._get_mutual_relations(
relation.parent_id, itertools.chain(*rules_by_user.values())
)
if relation.rel_type == RelationTypes.THREAD:
thread_id = relation.parent_id
evaluator = PushRuleEvaluatorForEvent(
event,
room_member_count,
sender_power_level,
power_levels,
relations,
self._relations_match_enabled,
)
users = rules_by_user.keys()
profiles = await self.store.get_subset_users_in_room_with_profiles(
event.room_id, users
)
# This is a check for the case where user joins a room without being
# allowed to see history, and then the server receives a delayed event
# from before the user joined, which they should not be pushed for
uids_with_visibility = await filter_event_for_clients_with_state(
self.store, users, event, context
)
for uid, rules in rules_by_user.items():
if event.sender == uid:
continue
if uid not in uids_with_visibility:
continue
display_name = None
profile = profiles.get(uid)
if profile:
display_name = profile.display_name
if not display_name:
# Handle the case where we are pushing a membership event to
# that user, as they might not be already joined.
if event.type == EventTypes.Member and event.state_key == uid:
display_name = event.content.get("displayname", None)
if not isinstance(display_name, str):
display_name = None
if count_as_unread:
# Add an element for the current user if the event needs to be marked as
# unread, so that add_push_actions_to_staging iterates over it.
# If the event shouldn't be marked as unread but should notify the
# current user, it'll be added to the dict later.
actions_by_user[uid] = []
for rule, enabled in rules:
if not enabled:
continue
matches = evaluator.check_conditions(rule.conditions, uid, display_name)
if matches:
actions = [x for x in rule.actions if x != "dont_notify"]
if actions and "notify" in actions:
# Push rules say we should notify the user of this event
actions_by_user[uid] = actions
break
# Mark in the DB staging area the push actions for users who should be
# notified for this event. (This will then get handled when we persist
# the event)
await self.store.add_push_actions_to_staging(
event.event_id,
actions_by_user,
count_as_unread,
thread_id,
) | Merge remote-tracking branch 'origin/develop' into squah/fix_rejected_events_in_state | https://github.com/matrix-org/synapse/commit/be4c8ebc2f3dc2747b21647378e11569cc642f1e | null | null | synapse/push/bulk_push_rule_evaluator.py | 0 | py | false | 2022-09-15T14:37:47Z |
@Overwrite
public void open(URI uri) {
CompletableFuture.runAsync(() -> {
try {
this.open(uri.toURL());
} catch (MalformedURLException e) {
System.err.println("Couldn't open uri '" + uri + "'");
e.printStackTrace();
}
});
} | @Overwrite(aliases = {"method_673", "m_137648_"}, remap = false)
public void open(URI uri) {
CompletableFuture.runAsync(() -> {
try {
this.open(uri.toURL());
} catch (MalformedURLException e) {
System.err.println("Couldn't open uri '" + uri + "'");
e.printStackTrace();
}
});
} | MC-46766, MC-215531, MC-217716, MC-69216, MC-119417, MC-119754, MC-129909, MC-193343, MC-215530
only save config on launch once and only if there is a difference
only register bug fix if it hasn't already been registered (2+ mixins per fix)
fix crash with forge from arch bug (MC-148149)
closes #33 | https://github.com/isXander/Debugify/commit/fa3a1c3d38f5ba67cb0b75c85d53110ffd8b7f48 | null | null | common/src/main/java/cc/woverflow/debugify/mixins/client/mc148149/OperatingSystemMixin.java | 0 | java | false | 2022-03-31T19:10:19Z |
public void allocation(int i) { allocation = bounds(0,i,MAX_SPENDING_TICKS); } | public void allocation(int i) {
allocation = bounds(0,i,MAX_SPENDING_TICKS);
view().owner().flagColoniesToRecalcSpending();
} | Inofficial 0.93a (#62)
* Replaced many isPlayer() by isPlayerControlled()
This leads to a more fluent experience on AutoPlay.
Note: the council-voting acts weird when you try that there, getting you stuck, and the News-Broadcasts also seem to use a differnt logic and are always shown.
* Orion-guard-handling
Avoid sending colonization-fleets, when guardian is still alive
Increase guardian-power-estimate to 100,000
Don't send and count bombers towards guardian-defeat-force
* Fixed Crash from a report on reddit
Not sure how that could happen though. Never had this issue in games started on my computer.
* Fixed crash after AI defeats orion-guard
Tried to access an out-of-bounds-value because it couldn't find the System where the guardian was. So looking to find the Planet with the Orionartifact now, which does find it.
* All the forgotten isPlayer()-calls
Including the Base- and Modnar-AI-Diplomat
* Personalities now play a role in my AI once again
Instead of everyone having the same conditions for war, there's now different ones. Some are more aggressive, some are less.
* Fixed issue where enemy-fleets weren's seen, when they actually were.
Also added a way of guessing when enemy-fleets aren't seen so it shouldn't trickle in tiny fleets that all retreat anymore in the early-game, before they can see the enemy-fleets. And also not later because they now actually can see them.
* Update AIDiplomat.java
* Delete ShipCombatResults.java
* Delete Empire.java
* Delete TradeRoute.java
* Revert "Delete TradeRoute.java"
This reverts commit 6e5c5a8604e89bb11a9a6dc63acd5b3f27194c83.
* Revert "Delete Empire.java"
This reverts commit 1a020295e05ebc735dae761f426742eb7bdc8d35.
* Revert "Delete ShipCombatResults.java"
This reverts commit 5f9287289feb666adf1aa809524973c54fbf0cd5.
* Update ShipCombatResults.java
reverting pull-request... manually because I don't know of a better way 8[
* Update Empire.java
* Update TradeRoute.java
* Merge with Master
* Update TradeRoute.java
* Update TradeRoute.java
github editor does not allow me to remove a newline oO
* AI improvements
Added parameter to make the colonizer-function return just the uncolonized planets without substracting the colony-ships.
Impelemented a very differnt diplomatic behavior, that supposedly is more suitable to actually winning by waging less war, particularly when big enough to be voted for.
Revoked that AI doesn't adjust to enemy-troops when it has a more defensive unit-composition. In lategame going by destructive-power of bombers hasn't much merit, when 1 bombers can pack 60ish neutronium-bombs.
Fixed an issue that prevented designing extended-fuel-range-colonizers asap. This once again helps expansion-speed a lot.
Security now is only spent against empires actually in range to spy, as it was intended. Contact via council should not increase paranoia. Also Xenophobic-extra-paranoia removed.
* Update OrionGuardianShip.java
* War-declaration behavior
Electible faction will no longer declare war on someone who is at war with the other electible faction.
* Fix zero-division on refreshing a trade-route that had been adjusted due to empire-shrinkage.
* AI & Bugfix
Leader-stuff is now overridden
Should now retreat against repulsors, when no counter available
Tech-nullifier hit and miss mixup fixed
* AI improvements
Fixed a rare crash in AutoPlay-Mode
Massive overhaul of AI-ship-design
Spy master now more catious about avoiding unwanted, dangerous wars due to spying on the wrong people
Colony-ship-designs now will only get a weapon, if this doesn't prevent reserve-tanks or reducing their size to medium
Fixed an issue where ai wouldn't retreat from ships with repulsor-beam that exploited their range-advantage
Increased research-value of cloaking-tech and battle-suits
Upon reaching tech-level 99 in every field, AI will produce ships non-stop even in peace-time
AI will now retreat fleets from empires it doesn't want to go to war with in order to avoid trespassing-issues
Fixed issue where the AI wanted to but couldn't colonize orion before it was cleared and then wouldn't colonize other systems in that time
AI will no longer break non-aggression-pacts when it still has a war with someone else
AI will only begin wars because of spying, if it actually feels comfortable dealing with the person who spied on them
AI will now try and predict whether it will be attacked soon and enact preventive measures
* Tackling some combat-issue
Skipping specials when it comes to determining optimal-range. Otherwise ships with long-range-specials like warp-dissipator would never close in on their prey.
Resetting the selectedWeaponIndex upon reload to consistency start with same weapons every turn.
Writing new attack-method that allows to skip certain weapons like for example repulsor-beam at the beginning of the turn and fire them at the end instead.
* Update NewShipTemplate.java
Fixed incorrect operator "-=" instead of "=-", that prevented weapons being used at all when no weapon that is strong enough for theorethical best enemy shield could be found.
Also when range is required an not fitting beam can be found try missiles and when no fitting missile can be found either, try short-range weapon. Should still be better than nothing.
* Diplomacy and Ship-Building
Reduced suicidal tendencies. No more war-declarations against wastly superior enemies. At most they ought to be 25% stronger.
Voting behavior is now deterministic. No more votes based on fear, only on who they actually like and only for those whom they have real-contact.
Ship-construction is now based on relative-productivity rather than using a hard cut-off for anything lower than standard-resources. So when the empire only has poor-systems it will try to build at least a few ships still.
* Orion prevented war
Fixed an issue, where wanting to build a colonizer for Orion prevented wars.
* Update AIGeneral.java
Don't invade unless you have something in orbit.
* Update AIFleetCommander.java
No longer using hyperspace-communications... for now. (it produced unintentional results and error-messages)
* AI improvements
corrected typo in Stellar Converter
Fixed trading for techs that have no value by taking the random modifier into account
No longer agreeing to or offering alliances
Added sophisticated behavior when it comes to offering and agreeing to joint wars
Ignore existence of council in diplomatic behavior
No longer needing to have positive relations for council vote, the lesser evil is good enough
Improved the selection at which planets ships are being built, this time really improved rather than probably making it worse than before
No longer using extended fuel-tanks when unlimited-range-tech is researched
Avoid designing bombers that can overkill a planet single-handedly, instead replace part of the bombs with other weapons
Scoring adjustments for special-devices, primarily no longer taking space into account for score as otherwise miniaturization would always lead to always using the old, weak specials
* AI-improvements and combat-estimated fixes
No longer wanting to do a NAP with preferred target
Divide score for target by 3 if nap is in place, so nap now is really usefull to have with Empires outside of trading range will no longer offer joint wars
Logic for whether to declare war on somone who spied on them is now same as if they were their best victim. This prevents suicidal anti-spy-wars.
Prevention-war will now also be started upon invasions
No longer setting retreat on arrival, as preemtively retreating may prevent retargetting something else, especially important for colony-ships early on
Retreats are now also handled after the fleet-manager had the chance to do something else with these ships
reduced the percentage of bombers that will be built by about half. So it can't go over 50% anymore and usually will be even lower
Fixed an issue with incorrect kill-percentage estimates. After that fix the Valuefactor, which had some correcting attributes, was no longer needed and thus removed.
Made sure that bombers are always designed to actually have bombs.
Invaders with surrenderOnArrival are no longer considered a threat
* AI and damage-simulation-stuff
No longer bombing colonies of someone who isn't my enemy
Asking others to join a war, when our opponent is stronger than us
Agreeing to join-war request when not busy otherwise, together we'd be stronger and the asked-for-faction is our primary target anyways
Restricted war over espionage even more, to only do it if we feel ready. Much better to share a few techs than to die.
Wars for strategic reasons can now once again be cancelled.
Doing a cost:gain-analysis for invasions, that is much more likely to invade, especially if we have previously been invaded and the planet has factories from us. It also takes the savings of not having to build a colony-ship into account.
Further reduced the overkill for invasions. We want to do more of them when they are cost-efficient but still not waste our population.
More planets will participate in the construction of colony-ships.
When we are at war or have others in our range during the phase where we want to churn out colony-ships, we mix in combat-ships to make us less of a juicy target and be able to repel sneak-attacks better.
Hostile-planets now get more population sent to.
Stacks of strong individual ships about to die in combat, will consider their lessened chances of survival and may now retreat when damaged too much.
Reverted change in method that I don't use anyways.
Fixed shield-halving-properties of weapons like neutron-pellet-guns not being taken into account in combat-simulations using the firepower-method.
Fixed bombers estimating the whole stack sharing a single bomb, which dramatically underestimated the firepower they had.
* Mostly things about research
Fixed that studyingFutureTech() always returned false.
Fixed issue with having more research-allocation-points than possible in base- and modnar-AI when redistributing research-points to avoid researching future techs before other techs are researched
Inclusion of faction-specific espionage-replies
Maximum ship-maintenance now capped at 45% (5*warp-speed) before all techs are researched, then it is doubled to 90%
More bombers will be built when opponents use missile-bases a lot
When under siege by enemy bombers will no longer build factories but either population, ships or research depending on how destructive the bombardment is. This is to avoid investing into something that will be lost or not finished anyways.
Future techs are now considered lower priority than all non-obsolete techs.
Fixed missing override of baseValue for future-techs
No longer allocating RP to techs which have a higher percentage to be researched next turn than the percentage of RP attributed to them. This should increase research-output slightly.
Ship-stacks that have colony-ships in their fleet will no longer be much more ready to retreat because they consider the low survivability of colony-ships as a threat to themselves. Colony-ships may retreat alone, but won't always do so either.
Retreat threshold is now always 1.0, meaning if they think they can win, they will stay, even if the losses equal their own kills.
Allowed treasurer to gather taxes in order to save planets from nova or plague.
* Making the AI even tougher
Will now go to war at 75% development rather than 100%. So much more aggressive with much fewer and shorter periods of peace.
Will no longer offer or agree to a peace-treaty, when it has ongoing invasions.
Maximum-ship-maintenance-percentage now is a root-function of tech-level that's steeper early on and smoothes out later.
During peace-time will now gather ships at centralized locations for better responsiveness.
Will no longer stage an attack on the orion-guardian while having enemies.
Siege-fleets will no longer wait for transports to land before doing something else.
Reduced the superiority-aim for combats to 2, down from 4 to allow more simultaneous missions.
When a system where ships are gathered has incoming enemy fleets, this no longer prevents the ships from going to other missions until the attacks are over, which had allowed a theoretical exploit where you could keep sending tiny fleets to make the AI not move their fleets. Instead they now substract the exact amount they think they need to defend from the ships that they can send away.
Stopping to make more ships when in possession of more than 4 times the amount of all enemies combined.
Making more ships when non-enemy-neigbours build more ships. Aiming for at least 1/4th of that what our strongest non-enemy-neighbor has.
Decreased the importance of income of a planet to determine how usefull it is for production. No resource-bonuses are more important for that.
Fixed issues with tech-slider-management caused by upcomingDiscoverChance looking at the forecast of RP that will be added rather than the RP spent at the time it is called.
When only future-techs are left to research, the allocations will be shifted in a way to heavily favor propulsion and weapons as those generate by far the greatest miniaturization-benefits.
No longer overestimate combat-efficiency against enemy colony-ships by counting it as a kill for each individual stack.
Avoid scrapping when we still have enemies rather than just when being at war.
Now taking only 3 instead of 5 colonies into account when making the decision what hull-sizes to use. This delays the usage of huge hulls further into the late-game and is meant to avoid scenarios like there's only being 2 huge bombers for 5 enemy-colonies instead of 12 large ones.
The AI you watch in Autoplay-mode no longer receives difficulty-bonuses. This means you can now let it play against harder difficulty-levels.
* Update Colony.java
Waste-modifier for autoplay against easier-difficulty-levels was wrong.
* Fixes
Fixed possible crash in combat-outcome-estimation.
Fixed ai-waste-modifier for below normal-difficulty-levels to be applied to the waste-generation rather than the cleanup-cost.
* AI performance
Instead of iterating through a triple-nestes-loop of fleets, systems and fleets, the inner loop is now buffered so that in consecutive calls there's a lot less to process. This significantly improves turn-times.
Colonizers with extended range can now be sent to colonize something when they are in a fleet that otherwise couldn't travel so far.
Removed unused code from AIGeneral.
AIShipCaptain is now thread-save due to no longer manipulating stack and instead strong the target in their own local member-variable.
Specials are now properly used in the right order. All specials except repulsor-beam and stasis-field are now cast before the other weapons.
Now fleeing from ship with repulsor if our optimal-range is outranged, not just our maximum-range. Also now considering that the repulsor-stack can protect more than itself (up to 3 more stacks when they are in a corner).
Will now design and use huge colonizers when stranded in a constellation that otherwise can't be escaped. The logic to determine that might still be a little flawed, though.
Repulsor now will be countered even if it is only researched and not installed in a ship yet since only reacting once it's seen could be too late.
Made shipComonentCanAttack public for improved logic in attacking.
Included fix from /u/albertinix that caused a crash when hitting pause in auto-combat.
UncolonizedPlanetsInRange-Methods in empire.java now actually only return uncolonized planets.
Fixed an issue where shield-reduction would be applied twice in combat-simulation.
Capped the amount of estimated kills at the amount of targets destroyed.
* Update AIFleetCommander.java
Increased colonization-bonus for unlockig additional systems.
* Update AIFleetCommander.java
doubled the score-increase on unexplored.
* Fixes and imrpvements
Fixed possible double-colonization by two AIs arriving at a system in the same turn.
Reenabled the usage of Hyperspace-communications.
IncomingBc and Bombard-damage are now updated directly in the Hash for referencing during the same turn.
Taught AI to situationally utilize huge hulls for colonizers with extended range.
Techs with 0 value now actually return 0 value rather than 0 + random()
More aggressive use of espionage.
* Blackhole-fixes
Fixed an issue that stacks killed by Blackhole-Generator only die in combat but are still alive afterwards.
Fixed an issue that Blackhole-Generator would always kill at least one ship, even if it rolled low. It now can miss when the amount of enemy ships is smaller than the percentage it rolls to kill.
Fixed how damage of blackhole-generator was shown as the percentage of the hitpoints of the ships rather than the total hitpoints of all ships it killed.
* Update CombatStackShip.java
Fixed an issue where either the target being a colony or the presence of ground-attack-only-weapons would return a preferred range of 1 rather than the combination of the two.
* AI improvements
No longer breaking trade-treaty with war-target to avoid giving a warning that could be used to prepare.
Fixed crash related to sorting a list with random values that could change during the lifetime of the list. That random wasn't overly useful anyways.
Autoresolve will no longer ignore repulsors (but only for Xilmi-AI so far).
Ships will now always try to come as close as they can before shooting as a means to increase hit-chance, use short-range-specials or make it harder to dodge missiles.
Added back the value of the participants for retreat-decisions, because otherwise a clearly won battle could be given up because it was slighlty cost-inefficient.
No longer always countering repulsors just because someone could use them. Instead now looking at whether they are actually in use before the decision is made.
* Missile-Base & Scatter-Pack
Fixed that the selection which missile-type to shoot from a missile-base had no impect.
Implemented automatic-selection of better missile-type in case of auto-resolve and for AIs to use.
Fixed that scatter-attacks weren't taken into account in firepower-estimation.
* Fix for ETA-bug when travel through nebula
For a fleet that already is in transit and has a destination the ETA is now taken from the ETA set at the begining of the journey rather than recalculated from it's current position.
* Update AIGovernor.java
Now taking into account that building factories scales with mineral-richness, whereas building population doesn't. The result is that most races now will prefer maxing population before factories on poor and ultra-poor.
* Revert "Fix for ETA-bug when travel through nebula"
This reverts commit c67b38203313d9d57aa07f53d7cffd67c344e16d.
* Update ShipFleet.java
Fix for ETA-bug again but without the redirect-rallied-fleet-exploit.
* Tiny change
No longer blocking the slot that is reserved for scouts once large-sized colonizers with extended fuel-tanks are available.
* Hybrid AI
Added another option for AI: Hybrid, which uses Diplomacy- and Espionage-modules from Modnar and the rest from Xilmi.
Kill-percentage no longer modified by hitpoints as hitpoints now are seperately considered.
Allowed designing Huge colonizers under, hopefully the right, circumstances.
* Update GroundBattleUI.java
Fixed wrong animation playing for defenders in ground combat, that caused them to die over and over instead of firing their weapon and dying only once!
* Update NewShipTemplate.java
Keep bombers smaller for longer.
* Some non-ai-fixes
Production is now capped at 0 at the lower end, to no longer allow weird side-effects of negative production-output.
Fixed Defense and Industry-texts to show "None", when there's no production.
SpyConfessionIncident now looks at leaderHatesAllSpies, which is different between AIs and same as before for default-AI.
* AI improvements
No more autopeace for humans.
No longer participating in non-aggression-pacts.
No longer complaining/asking for spies to be removed as those requests usually weren't followed up with from my AI anyways.
Always ignore when asked to do anything.
War weariness only takes factories and colonies into account and isn't different according to leader-personality anymore. Should result in fewer peace-treaties if one side is dominating as they don't condier it a problem to lose population during invasions, only if the opponent actually fights back.
Fixed that using gather-points wasn't working.
Now firing before moving if the target we want to move towards is far away.
No longer retreat the whole fleet if only a part of the fleet can't counter repulsors.
Enforce immediate new design to counter repulsors when someone starts using them.
* GitIgnore-Test
This should just include a change to AI name and reversal of an attempt to a bugfix, which caused an unintended side-effect.
If it also includes the artwork-mod, then gitignore doesn't work like I think it should.
* Revert "GitIgnore-Test"
This reverts commit 49600932a25dbfa19dd6aca411c7df0e94513505.
* Change AI-name and revert fix cause of side-effects
Side-effects were that an AI fleet teleported once.
* Update AIShipCaptain.java
Thanks to bot39lvl's relentless testing a bug in target-selection was fixed.
* Update CombatStackColony.java
Consider the damage of scatter-packs in combat-outcome-estimation
* Update Empire.java
Fixed crash upon a faction being eliminated.
* Update AIFleetCommander.java
Strategical-map improvements to attack more decisively and take into account that conquering a system will allow more systems to be attacked.
* Update AIFleetCommander.java
Only gather at enemy-colonies when there's an actual war, not yet during preparations.
* Update AIFleetCommander.java
Now treating enemy ships and enemy-missile bases seperately when it comes to calculating how much to send.
This should lead to bigger attacking-fleets in cases where the defense consists of only ships.
* Update AIFleetCommander.java
Fixed newly introduced issue that prevented colonizing.
* 0.92 findings
Less aggressive early on but more aggressive if cornered.
Lots of fixes and improvements about staging-points, gather-points and colony-ship-usage.
Now only building the exact amount of colonizers that is wanted instead of build colonizers for so long until I have more than I want, which had the potential to heavily overshoot.
Using fewer scouts early on.
Ships should now skip their move when they have pushed away someone with repulsors and still can move. (untested because situation hasn't come up yet and I lost the save where it happened)
* Some fixes/improvements
reduced warp-speed-requirement for war
attempted fix for redirected retreating ship-cheat
taking mrrshan and alkari-racial into account when determining attack-fleet-size-requirements
no longer bolstering systems that are under siege by an enemy
fire before retreating in combat if possible
avoid overestimating damage by taking maximum hit-chance into account for damage-predictions
* Update AIShipCaptain.java
Moving away from enemy ships after firing.
* Smarter ship-commander
No longer automatically becoming war-weary when behind in tech
Now moving away from enemies after firing when still movementpoints left
Now sticking near planet when defending and only start moving towards opponent, when first-hit can be achieved
* Small fixes as wrapup for test-release.
Fixed an issue with scout-production that was present in all AIs and caused by a discrepancy between the trave-time-calculations with and without considering nebulae.
During war no handling pirates or a comet has lower priority.
Fixed an issue where there was missing just one tick for finishing colony-ships, which led them to be delayed until factories where all built despite having started their production early.
Fixed crash that happened in combination with the new defensive behavior.
* Tweaks
Tweaked the gather-point-calculations
Reverted the cuddling with the planet while defending because it was more exploitable than beneficial
* some more minor tweaks
Using base-value in tech-trade rather than the tech's level. (needs testing)
Calling optimalStagingPoint with a 1 instead of the fleet's speed to avoid different speed-fleets going to different staging-points.
Starting to build ships also when someone else can reach us, not only when we can reach them.
Tactical target-selection now simply goes for doing the most damage measured in BC.
Sabotage now almost prefers rebellion as that is by far the most disruptive.
* Update AISpyMaster.java
I consider espionage and sabotage against darloks a bit of a fruitless endevour.
* UI stuff
Slight tweak to research-slider-allocation-management.
Transports that are being sent this turn now are represented by negative pop-growth in the colony-screen.
Fixed an issue where colony-screen would not have the system that was clicked on the main-screen preselected.
* Update AIDiplomat.java
Include Ray's improvements to AI-tech-trading.
* Update AIDiplomat.java
Including Ray's tech-trading-improvements.
Tweaks to war-declaration-logic.
* Update AIFleetCommander.java
New smarth-pathing-algorithm implemented. It makes AI-fleets take stops instead of flying directly to the destination.
* Update AIScientist.java
Reverted recent change about when to stop allocating into a category and made some tiny tweaks to initial allocations.
* Update AIShipDesigner.java
Prevented two rare but possible lock-ups regarding scrapping all designs and fixed an issue where a weaponless-design would not be scrapped.
* Update AISpyMaster.java
No longer inciting rebellions on opponents not at war with.
* Update NewShipTemplate.java
Fixed an issue that allowed weaponless designs to be made.
* Update ShipFleet.java
Fixed an issue with incorrect display of ETA when fleet passed nebula or slower ships were scrapped during the travel.
* Update Empire.java
Fix for ships can't see other ships at 0 range after scanner-rework.
* Update AIFleetCommander.java
Fixed that buffering had broken hyperspace-communication-usage.
Removed a previous change about overestimating how much to send to a colony.
* Update AIDiplomat.java
Preparations for how alliances could work. But afterall commented out because it just doesn't feel right.
* Update AIDiplomat.java
Hostility now impacts readyness for war and alliances.
* Update AIFleetCommander.java
Now giving cover to ongoing invasions
* Update AIScientist.java
Adjusted values of a bunch of techs.
* Update AIShipCaptain.java
Fixed accidental removal of range-adjustment in new target-selection-formula and removed unused code.
* Update NewShipTemplate.java
Allowing missiles to be used again under certain circumstances, taking ECM into account and basing weapon-decision on actual shield-levels rather than best possible.
* Update TechMissileWeapon.java
Fixed that 2-rack-missiles didn't have +1 speed as they should have according to official strategy-guide.
* Update AIScientist.java
Tech-Slider-Allocations now depend on racial-bonuses, not leader-personality.
* Immersive Xilmi-AI
When set to AI: Selectable, the Xilmi AI now goes in immersive-mode, once again making numerous uses of leader-traits.
* Update CombatStackColony.java
Fixed a bug, that planets without missile-bases always absorbed damage as if they already had a shield-built, even if they had none and even if they were in a nebula.
* Tactical combat improvements
Can now detect when someone protects another stack with repulsor-beams and retreats, when it cannot counter it with long-range-weapons.
Optimal range for firing missiles increased by repulsor-radius so missile-ships won't be affected by the can't counter repulsors-detection.
Missiles will now be held back until getting closer to prevent target from dodging them easily.
Unused specials like Repulsor will no longer prevent ships from kiting.
* Update AIShipCaptain.java
Only kite when there's either repulsor-beam or missile-weapons installed on the ship.
* Update AIFleetCommander.java
Defending is only half as desirable as attacking now.
* Immersive Xilmi-AI
Lot's of changes for the personality-mode of Xilmi-AI.
* Update DiplomaticEmbassy.java
Fix for one empire still holding a grudge against the other when signing a peace-treaty.
* Update AIShipCaptain.java
Somehow the Xilmi-AI must have had conquered a system with a missile-base on it against another type of AI and then crashed at this place, I didn't even think it would get to for a colony.
* Update AISpyMaster.java
No longer consider computer-advantage for how much to spend into counter-espionage.
* Race-fitting-playstyles implemented
and leader-specfic-self-restrictions removed.
* Update AIGeneral.java
Invader-AI back to normal victim-selection.
* Update AIGovernor.java
Turns out espionage didn't work like I thought it would.
So Darlok just building ships and relying on stealing techs was a bad idea.
(You can only steal techs below your highest level in any given tree!)
* Update AIDiplomat.java
Fixed issue where AI wouldn't allow the player to ask for tech-trades and where they would take deals that are horrible for themselves when trading with other AIs.
Trades are now usually all done within a 66%-150% tech-cost-range.
* Update AIGeneral.java
Giving themselves a personality/objective-combination that fits their behavior.
* Update AIDiplomat.java
Tech exchange only will work within same tier now.
* Update AIGeneral.java
Removed setting of personalities as this causes issue with missing texts.
* Update AIScientist.java
Fixed issue for Silicoids trading for Eco-restoration-techs.
* Update ColonyDefense.java
* Update ColonyDefense.java
Shield will now only be built automatically under the following circumstances:
There's missile-bases existing
There's missile-bases needing to be upgraded
There's missile-bases to be built
* Update AIDiplomat.java
Avoid false positives in war-declaration for incoming fleets when the system in question was only recently colonized.
* Update AIShipCaptain.java
Fixed a rare but not impossible crash.
* Update AIDiplomat.java
No longer accept joint-war-proposals from empires we like less than who they propose as victim.
* Update AIGovernor.java
Now building missile-bases... under very specific and rare circumstances.
* Update AIShipCaptain.java
Fixed crash when handling missile-bases.
Individual stacks that can't find a target to attack no longer automatically retreat and instead let their behavior be governed by whether the whole fleet would want to retreat.
* Update ColonyDefense.java
Reverted previous changes which also happened to prevent AI from building shields.
* Update CombatStackShip.java
Fixed issue where multi-shot-weapons weren't reported as used when they were used.
* Update AIFleetCommander.java
Fleets now are split depending on their warp-speed unless they are on the way to their final-attack-target.
* Update AIDiplomat.java
Knowing about uncolonized systems no longer prevents wars.
* Update CombatStackShip.java
Revert change to shipComponentIsUsed
* Update AIShipCaptain.java
Moved logic that detects if a weapon has been used to this file.
* Update AIScientist.java
Try to avoid researching techs we know our neighbors already have since we can trade for it or steal it.
* Update AIFleetCommander.java
Colony ships now only ignore distance if they are huge as otherwise ignoring distance was horrible in late-game with hyperspace-communications.
Keeping scout-designs around for longer.
* Fixed a very annoying issue that caused eco-spending to become...
insufficient and thus people to die when adjusting espionage- or security-spending.
* Reserve-management-improvements
Rich and Ultra-Rich planets now put their production into reserve when they would otherwise conduct research.
Reserve will now try to keep an emergency-reserve for dealing with events like Supernova.
Exceeding twice the amount of that reserve will always be spent so the money doesn't just lie around unused, when there's no new colonies that still need industry.
* Update AIShipCaptain.java
Removed an unneccessary white line 8[
* Update AIFleetCommander.java
Reversed keeping scouts for longer
* Update AIGovernor.java
no industry, when under siege
* Update AIGovernor.java
Small fixed to sieged colonies building industry when they shouldn't.
* Update Rotp.java
Versioning 0.93a
Co-authored-by: rayfowler <58891984+rayfowler@users.noreply.github.com> | https://github.com/rayfowler/rotp-public/commit/5a12028c875d802c26f3adf1352c61a7e720bfb9 | null | null | src/rotp/model/empires/SpyNetwork.java | 0 | java | false | 2021-06-03T16:54:07Z |
function D(n){if(Rn.test(n)){for(var t=On.lastIndex=0;On.test(n);)++t;n=t}else n=Qn(n);return n} | function D(n){for(var t,r=[];!(t=n.next()).done;)r.push(t.value);return r} | Update vendor libs | https://github.com/cockpit-hq/cockpit/commit/690016208850f2d788ebc3c67884d4c692587eb8 | CVE-2023-1160 | ['CWE-1103'] | modules/App/assets/vendor/lodash.js | 0 | js | false | 2023-02-22T01:10:02Z |
@Override
public void releaseUsing(ItemStack stack, Level level, LivingEntity living, int timeLeft) {
// need player
if (!(living instanceof Player player)) {
return;
}
// no broken
ToolStack tool = ToolStack.from(stack);
if (tool.isBroken()) {
return;
}
// TODO: modifier hook for inifinity/chance base arrow use
boolean infinity = player.getAbilities().instabuild; // || tool.getPersistentData().getBoolean();
// TODO: hook for custom ammo?
ItemStack ammo = player.getProjectile(stack); // TODO: we could make this stack sensitive instead
int chargeTime = this.getUseDuration(stack) - timeLeft;
chargeTime = ForgeEventFactory.onArrowLoose(stack, level, player, chargeTime, !ammo.isEmpty() || infinity);
// no ammo? no charge? nothing to do
if (chargeTime < 0 || (ammo.isEmpty() && !infinity)) {
return;
}
// no ammo? sub in vanilla arrows
if (ammo.isEmpty()) {
ammo = new ItemStack(Items.ARROW);
}
// calculate arrow power
StatsNBT stats = tool.getStats();
float charge = chargeTime * stats.get(ToolStats.DRAW_SPEED) / 20f;
charge = (charge * charge + charge * 2) / 3;
if (charge > 1) {
charge = 1;
}
float power = charge * stats.get(ToolStats.VELOCITY);
if (power < 0.1f) {
return;
}
// launch the arrow
boolean arrowInfinite = player.getAbilities().instabuild || (ammo.getItem() instanceof ArrowItem arrow && arrow.isInfinite(ammo, stack, player));
if (!level.isClientSide) {
ArrowItem arrowItem = ammo.getItem() instanceof ArrowItem arrow ? arrow : (ArrowItem)Items.ARROW;
AbstractArrow arrowEntity = arrowItem.createArrow(level, ammo, player);
arrowEntity.shootFromRotation(player, player.getXRot(), player.getYRot(), 0.0F, power * 3.0F, 3*(1/stats.get(ToolStats.ACCURACY))-1);
// TODO: modifier hook to add arrow properties
if (charge == 1.0F) {
arrowEntity.setCritArrow(true);
}
// half the power stat, as 0.5 is roughly 1 damage at full charge for a wooden bow
arrowEntity.setBaseDamage(arrowEntity.getBaseDamage() + 0.5 * stats.get(ToolStats.POWER));
ToolDamageUtil.damageAnimated(tool, 1, player, player.getUsedItemHand());
// if infinite, skip pickup
if (arrowInfinite || player.getAbilities().instabuild && !ammo.is(Items.ARROW)) {
arrowEntity.pickup = AbstractArrow.Pickup.CREATIVE_ONLY;
}
level.addFreshEntity(arrowEntity);
}
// consume items
level.playSound(null, player.getX(), player.getY(), player.getZ(), SoundEvents.ARROW_SHOOT, SoundSource.PLAYERS, 1.0F, 1.0F / (level.getRandom().nextFloat() * 0.4F + 1.2F) + charge * 0.5F);
if (!arrowInfinite && !player.getAbilities().instabuild) {
ammo.shrink(1);
if (ammo.isEmpty()) {
player.getInventory().removeItem(ammo);
}
}
player.awardStat(Stats.ITEM_USED.get(this));
} | @Override
public void releaseUsing(ItemStack stack, Level level, LivingEntity living, int timeLeft) {
// need player
if (!(living instanceof Player player)) {
return;
}
// no broken
ToolStack tool = ToolStack.from(stack);
if (tool.isBroken()) {
return;
}
// TODO: modifier hook for inifinity/chance base arrow use
boolean infinity = player.getAbilities().instabuild; // || tool.getPersistentData().getBoolean();
// TODO: hook for custom ammo?
ItemStack ammo = player.getProjectile(stack); // TODO: we could make this stack sensitive instead
int chargeTime = this.getUseDuration(stack) - timeLeft;
chargeTime = ForgeEventFactory.onArrowLoose(stack, level, player, chargeTime, !ammo.isEmpty() || infinity);
// no ammo? no charge? nothing to do
if (chargeTime < 0 || (ammo.isEmpty() && !infinity)) {
return;
}
// no ammo? sub in vanilla arrows
if (ammo.isEmpty()) {
ammo = new ItemStack(Items.ARROW);
}
// calculate arrow power
StatsNBT stats = tool.getStats();
float charge = chargeTime * stats.get(ToolStats.DRAW_SPEED) / 20f;
charge = (charge * charge + charge * 2) / 3;
if (charge > 1) {
charge = 1;
}
float power = charge * stats.get(ToolStats.VELOCITY);
if (power < 0.1f) {
return;
}
// launch the arrow
boolean arrowInfinite = player.getAbilities().instabuild || (ammo.getItem() instanceof ArrowItem arrow && arrow.isInfinite(ammo, stack, player));
if (!level.isClientSide) {
ArrowItem arrowItem = ammo.getItem() instanceof ArrowItem arrow ? arrow : (ArrowItem)Items.ARROW;
AbstractArrow arrowEntity = arrowItem.createArrow(level, ammo, player);
arrowEntity.shootFromRotation(player, player.getXRot(), player.getYRot(), 0.0F, power * 3.0F, 3*(1/stats.get(ToolStats.ACCURACY))-1);
// TODO: modifier hook to add arrow properties
if (charge == 1.0F) {
arrowEntity.setCritArrow(true);
}
ToolDamageUtil.damageAnimated(tool, 1, player, player.getUsedItemHand());
// if infinite, skip pickup
if (arrowInfinite || player.getAbilities().instabuild && !ammo.is(Items.ARROW)) {
arrowEntity.pickup = AbstractArrow.Pickup.CREATIVE_ONLY;
}
level.addFreshEntity(arrowEntity);
}
// consume items
level.playSound(null, player.getX(), player.getY(), player.getZ(), SoundEvents.ARROW_SHOOT, SoundSource.PLAYERS, 1.0F, 1.0F / (level.getRandom().nextFloat() * 0.4F + 1.2F) + charge * 0.5F);
if (!arrowInfinite && !player.getAbilities().instabuild) {
ammo.shrink(1);
if (ammo.isEmpty()) {
player.getInventory().removeItem(ammo);
}
}
player.awardStat(Stats.ITEM_USED.get(this));
} | Tweak bow stats a bit more
Removed power, will just use velocity
Instead of average, stats are additive
Bows no longer have attack speed from materials | https://github.com/SlimeKnights/TinkersConstruct/commit/5c86c3411a1e6e764ef063b9212d3ef47360b8cf | null | null | src/main/java/slimeknights/tconstruct/tools/item/ModifiableBowItem.java | 0 | java | false | 2022-12-04T06:47:21Z |
private static void resolveStack(final Game game, final Player opponent) {
// TODO: This needs to set an AI controller for all opponents, in case of multiplayer.
opponent.runWithController(new Runnable() {
@Override
public void run() {
final Set<Card> allAffectedCards = new HashSet<>();
game.getAction().checkStateEffects(false, allAffectedCards);
game.getStack().addAllTriggeredAbilitiesToStack();
while (!game.getStack().isEmpty() && !game.isGameOver()) {
debugPrint("Resolving:" + game.getStack().peekAbility());
// Resolve the top effect on the stack.
game.getStack().resolveStack();
// Evaluate state based effects as a result of resolving stack.
// Note: Needs to happen after resolve stack rather than at the
// top of the loop to ensure state effects are evaluated after the
// last resolved effect
game.getAction().checkStateEffects(false, allAffectedCards);
// Add any triggers additional triggers as a result of the above.
// Must be below state effects, since legendary rule is evaluated
// as part of state effects and trigger come afterward. (e.g. to
// correctly handle two Dark Depths - one having no counters).
game.getStack().addAllTriggeredAbilitiesToStack();
// Continue until stack is empty.
}
}
}, new PlayerControllerAi(game, opponent, opponent.getLobbyPlayer()));
} | public static void resolveStack(final Game game, final Player opponent) {
// TODO: This needs to set an AI controller for all opponents, in case of multiplayer.
opponent.runWithController(new Runnable() {
@Override
public void run() {
final Set<Card> allAffectedCards = new HashSet<>();
game.getAction().checkStateEffects(false, allAffectedCards);
game.getStack().addAllTriggeredAbilitiesToStack();
while (!game.getStack().isEmpty() && !game.isGameOver()) {
debugPrint("Resolving:" + game.getStack().peekAbility());
// Resolve the top effect on the stack.
game.getStack().resolveStack();
// Evaluate state based effects as a result of resolving stack.
// Note: Needs to happen after resolve stack rather than at the
// top of the loop to ensure state effects are evaluated after the
// last resolved effect
game.getAction().checkStateEffects(false, allAffectedCards);
// Add any triggers additional triggers as a result of the above.
// Must be below state effects, since legendary rule is evaluated
// as part of state effects and trigger come afterward. (e.g. to
// correctly handle two Dark Depths - one having no counters).
game.getStack().addAllTriggeredAbilitiesToStack();
// Continue until stack is empty.
}
}
}, new PlayerControllerAi(game, opponent, opponent.getLobbyPlayer()));
} | Fix crash from combat triggers when planning ahead of current phase | https://github.com/Card-Forge/forge/commit/595cd10451d5ad157f35d7b36b5151d27b1d1228 | null | null | forge-ai/src/main/java/forge/ai/simulation/GameSimulator.java | 0 | java | false | 2021-11-27T21:28:39Z |
@Override
public void visitBefore(SAXElement element, ExecutionContext executionContext) throws IOException {
final Invocation<SAXVisitBefore> invocation = new Invocation<SAXVisitBefore>() {
@Override
public Object invoke(SAXVisitBefore visitor) {
try {
visitor.visitBefore(element, executionContext);
} catch (IOException e) {
throw new SmooksException(e.getMessage(), e);
}
return null;
}
@Override
public Class<SAXVisitBefore> getTarget() {
return SAXVisitBefore.class;
}
};
if (getTarget() instanceof SAXVisitBefore) {
final Fragment saxElementFragment = new SAXElementFragment(element);
for (ExecutionEventListener executionEventListener : executionContext.getContentDeliveryRuntime().getExecutionEventListeners()) {
executionEventListener.onEvent(new ResourceTargetingEvent(saxElementFragment, getTarget().getResourceConfig(), VisitSequence.BEFORE));
}
intercept(invocation);
onEvent(executionContext, saxElementFragment, VisitSequence.BEFORE);
} else {
intercept(invocation);
}
} | @Override
public void visitBefore(SAXElement element, ExecutionContext executionContext) throws IOException {
final Invocation<SAXVisitBefore> invocation = new Invocation<SAXVisitBefore>() {
@Override
public Object invoke(SAXVisitBefore visitor) {
try {
visitor.visitBefore(element, executionContext);
} catch (IOException e) {
throw new SmooksException(e.getMessage(), e);
}
return null;
}
@Override
public Class<SAXVisitBefore> getTarget() {
return SAXVisitBefore.class;
}
};
if (getTarget() instanceof SAXVisitBefore) {
final Fragment<SAXElement> saxElementFragment = new SAXElementFragment(element);
for (ExecutionEventListener executionEventListener : executionContext.getContentDeliveryRuntime().getExecutionEventListeners()) {
executionEventListener.onEvent(new ResourceTargetingEvent(saxElementFragment, getTarget().getResourceConfig(), VisitSequence.BEFORE));
}
intercept(invocation);
onEvent(executionContext, saxElementFragment, VisitSequence.BEFORE);
} else {
intercept(invocation);
}
} | test: replace TestNg with JUnit (#424) | https://github.com/smooks/smooks/commit/46252769fb6b098c2cf805e88d5681688a2de5b1 | null | null | core/src/main/java/org/smooks/engine/delivery/interceptor/EventInterceptor.java | 0 | java | false | 2021-03-05T15:35:17Z |
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end."""
# st.write requires two spaces before a newline to render it
st.write(finish.log.replace("\n", " \n")) | def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end."""
# st.write requires two spaces before a newline to render it
self.st.write(finish.log.replace("\n", " \n")) | Merge remote-tracking branch 'upstream/master' | https://github.com/hwchase17/langchain/commit/e12294f00cb3c6d3afd6eaf0541dc3056029fc10 | null | null | langchain/callbacks/streamlit.py | 0 | py | false | 2023-06-21T06:45:39Z |
private boolean shouldSlideInVolumeTray() {
return mContext.getDisplay().getRotation() != RotationPolicy.NATURAL_ROTATION;
} | private boolean shouldSlideInVolumeTray() {
return mContext.getDisplay().getRotation() != RotationPolicy.getNaturalRotation();
} | Configurable 0, 90, 180 and 270 degree rotation
Change-Id: Ia1859c51d71ef9d01cec2d13c6468ed89c6ac53e
Contributor: jonasl
- Add system setting
Author: Tim Schumacher <timschumi@gmx.de>
Date: Wed Nov 28 21:44:18 2018 +0100
Add back ACCELEROMETER_ROTATION_ANGLES and update references
This needed to move back into fw/b, because it needs to be
accessed from inside the RotationPolicy. Previously
(when this constant and Setting were located in lineage-sdk),
the settings for the display rotation had no effect, because we
couldn't query the correct set of settings.
Change-Id: Icce249925a578c328db3884e5d332b20d6e7db6c
Fixes: BUGBASH-2042
Author: eray orçunus <erayorcunus@gmail.com>
Date: Mon Jun 22 22:47:40 2015 +0300
Rotation related corrections
- There are some conditions deleted while placing rotation angles code, I added them.
- Rotation lock was screwed up since CM 12. Fixed it by fetching allowed rotations from CM's
allowed rotations setting.
- Also, a CAF commit had killed rotation lock ability.
[port to 15.1]:
- ACCELEROMETER_ROTATION_ANGLES moved to LineageSDK
- Slight change of the WindowManager API
[port to 16.0]:
- adjust context
- ACCELEROMETER_ROTATION_ANGLES moved to Settings
- Use the configstore API
Change-Id: I8f1b468249c68e7b6514d1a96bdb3fc638af84fd
Signed-off-by: eray orçunus <erayorcunus@gmail.com>
(cherry picked from commit a62720d51e9d8f3a3c9992ea0bdb707b64e865b5)
Author: Tim Schumacher <timschumi@gmx.de>
Date: Thu May 2 19:48:39 2019 +0200
RotationPolicy: Don't crash if configstore 1.1 isn't available
Change-Id: I77301ec8c72393daa0003ca310eee07b767d4e69
Change-Id: Ia7bf8cb64258e1d602230a8f9ea227d3b56a4dab | https://github.com/LineageOS/android_frameworks_base/commit/4bd00b8482437e66dc89ecd55ee182bc6cb9a328 | null | null | packages/SystemUI/src/com/android/systemui/volume/VolumeDialogImpl.java | 0 | java | false | 2013-11-04T01:56:16Z |
def historical_metrics_data(self):
"""Returns cluster activity historical metrics."""
start_date = _safe_parse_datetime(request.args.get("start_date"))
end_date = _safe_parse_datetime(request.args.get("end_date"))
with create_session() as session:
# DagRuns
dag_runs_type = session.execute(
select(DagRun.run_type, func.count(DagRun.run_id))
.where(
DagRun.start_date >= start_date,
or_(DagRun.end_date.is_(None), DagRun.end_date <= end_date),
)
.group_by(DagRun.run_type)
).all()
dag_run_states = session.execute(
select(DagRun.state, func.count(DagRun.run_id))
.where(
DagRun.start_date >= start_date,
or_(DagRun.end_date.is_(None), DagRun.end_date <= end_date),
)
.group_by(DagRun.state)
).all()
# TaskInstances
task_instance_states = session.execute(
select(TaskInstance.state, func.count(TaskInstance.run_id))
.join(TaskInstance.dag_run)
.where(
DagRun.start_date >= start_date,
or_(DagRun.end_date.is_(None), DagRun.end_date <= end_date),
)
.group_by(TaskInstance.state)
).all()
data = {
"dag_run_types": {
**{dag_run_type.value: 0 for dag_run_type in DagRunType},
**{run_type: sum_value for run_type, sum_value in dag_runs_type},
},
"dag_run_states": {
**{dag_run_state.value: 0 for dag_run_state in DagRunState},
**{run_state: sum_value for run_state, sum_value in dag_run_states},
},
"task_instance_states": {
"no_status": 0,
**{ti_state.value: 0 for ti_state in TaskInstanceState},
**{ti_state or "no_status": sum_value for ti_state, sum_value in task_instance_states},
},
}
return (
htmlsafe_json_dumps(data, separators=(",", ":"), dumps=flask.json.dumps),
{"Content-Type": "application/json; charset=utf-8"},
) | def historical_metrics_data(self):
"""Returns cluster activity historical metrics."""
start_date = _safe_parse_datetime(request.args.get("start_date"))
end_date = _safe_parse_datetime(request.args.get("end_date"))
with create_session() as session:
# DagRuns
dag_runs_type = (
session.query(DagRun.run_type, func.count(DagRun.run_id))
.filter(
DagRun.start_date >= start_date,
or_(DagRun.end_date.is_(None), DagRun.end_date <= end_date),
)
.group_by(DagRun.run_type)
.all()
)
dag_run_states = (
session.query(DagRun.state, func.count(DagRun.run_id))
.filter(
DagRun.start_date >= start_date,
or_(DagRun.end_date.is_(None), DagRun.end_date <= end_date),
)
.group_by(DagRun.state)
.all()
)
# TaskInstances
task_instance_states = (
session.query(TaskInstance.state, func.count(TaskInstance.run_id))
.join(TaskInstance.dag_run)
.filter(
DagRun.start_date >= start_date,
or_(DagRun.end_date.is_(None), DagRun.end_date <= end_date),
)
.group_by(TaskInstance.state)
.all()
)
data = {
"dag_run_types": {
**{dag_run_type.value: 0 for dag_run_type in DagRunType},
**{run_type: sum_value for run_type, sum_value in dag_runs_type},
},
"dag_run_states": {
**{dag_run_state.value: 0 for dag_run_state in DagRunState},
**{run_state: sum_value for run_state, sum_value in dag_run_states},
},
"task_instance_states": {
"no_status": 0,
**{ti_state.value: 0 for ti_state in TaskInstanceState},
**{ti_state or "no_status": sum_value for ti_state, sum_value in task_instance_states},
},
}
return (
htmlsafe_json_dumps(data, separators=(",", ":"), dumps=flask.json.dumps),
{"Content-Type": "application/json; charset=utf-8"},
) | Merge branch 'main' into disable-default-test-connection-functionality-on-ui | https://github.com/apache/airflow/commit/c62ed8a0a2f51fabc0033839d7c3b8296b620db4 | null | null | airflow/www/views.py | 0 | py | false | 2023-07-04T13:09:43Z |
public void initTempDeleteArray() {
this.tempdeleteBitMap = new BitArrayDisk(this.hdt.getTriples().getNumberOfElements(),
this.locationHdt + "triples-delete-temp.arr");
} | public void initTempDeleteArray() {
this.tempdeleteBitMap = new BitArrayDisk(this.hdt.getTriples().getNumberOfElements(),
hybridStoreFiles.getTripleDeleteTempArr());
} | add step crashing and start tests | https://github.com/the-qa-company/qEndpoint/commit/217106e7f5f46e24673c6f5724517c44c30f1457 | null | null | hdt-qs-backend/src/main/java/com/the_qa_company/q_endpoint/hybridstore/HybridStore.java | 0 | java | false | 2022-02-02T17:04:44Z |
def get_language_pack(locale: str) -> Optional[Dict[str, Any]]:
"""Get/cache a language pack
Returns the language pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords"
"""
pack = ALL_LANGUAGE_PACKS.get(locale)
if not pack:
filename = DIR + "/{}/LC_MESSAGES/messages.json".format(locale)
try:
with open(filename, encoding="utf8") as f:
pack = json.load(f)
ALL_LANGUAGE_PACKS[locale] = pack or {}
except Exception: # pylint: disable=broad-except
# Assuming english, client side falls back on english
pass
return pack | def get_language_pack(locale: str) -> Optional[dict[str, Any]]:
"""Get/cache a language pack
Returns the language pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords"
"""
pack = ALL_LANGUAGE_PACKS.get(locale)
if not pack:
filename = DIR + f"/{locale}/LC_MESSAGES/messages.json"
try:
with open(filename, encoding="utf8") as f:
pack = json.load(f)
ALL_LANGUAGE_PACKS[locale] = pack or {}
except Exception: # pylint: disable=broad-except
# Assuming english, client side falls back on english
pass
return pack | Merge branch 'master' into fix/db-val-param-perms | https://github.com/apache/superset/commit/4e2fd6f4f04c61e8c1d3ec3f233581a05f8b6213 | null | null | superset/translations/utils.py | 0 | py | false | 2023-06-05T08:42:54Z |
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call the Petals API."""
params = self._default_params
inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"]
outputs = self.client.generate(inputs, **params)
text = self.tokenizer.decode(outputs[0])
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text | def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the Petals API."""
params = self._default_params
params = {**params, **kwargs}
inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"]
outputs = self.client.generate(inputs, **params)
text = self.tokenizer.decode(outputs[0])
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text | Merge remote-tracking branch 'upstream/master' | https://github.com/hwchase17/langchain/commit/e12294f00cb3c6d3afd6eaf0541dc3056029fc10 | null | null | langchain/llms/petals.py | 0 | py | false | 2023-06-21T06:45:39Z |
def _set_proxy_info(repo):
proxy = ""
# Worth passing in proxy config info to from_ent_cert_content()?
# That would decouple Repo some
proxy_host = CFG.get('server', 'proxy_hostname')
# proxy_port as string is fine here
proxy_port = CFG.get('server', 'proxy_port')
if proxy_host != "":
proxy = "https://%s" % proxy_host
if proxy_port != "":
proxy = "%s:%s" % (proxy, proxy_port)
# These could be empty string, in which case they will not be
# set in the yum repo file:
repo['proxy'] = proxy
repo['proxy_username'] = CFG.get('server', 'proxy_user')
repo['proxy_password'] = CFG.get('server', 'proxy_password')
return repo | def _set_proxy_info(repo):
proxy = ""
# Worth passing in proxy config info to from_ent_cert_content()?
# That would decouple Repo some
proxy_host = conf['server']['proxy_hostname']
# proxy_port as string is fine here
proxy_port = conf['server']['proxy_port']
if proxy_host != "":
proxy = "https://%s" % proxy_host
if proxy_port != "":
proxy = "%s:%s" % (proxy, proxy_port)
# These could be empty string, in which case they will not be
# set in the yum repo file:
repo['proxy'] = proxy
repo['proxy_username'] = conf['server']['proxy_user']
repo['proxy_password'] = conf['server']['proxy_password']
return repo | Provide DBus objects for configuration, facts, and registration.
This commit creates DBus objects off the com.redhat.RHSM1 namespace.
Objects include Facts which is meant to gather all the relevant system
facts, Config which gives access to the subscription-manager
configuration settings, and RegisterServer which opens a domain socket
with another DBus object listening to allow for system registration.
The indirection over the domain socket is so that credentials will be
passed securely from one process to another instead of going over the
system bus.
This commit also retrofits the main subscription-manager code to use the
Facts and Config objects. | https://github.com/candlepin/subscription-manager/commit/2aa48ef65 | null | null | src/subscription_manager/repolib.py | 0 | py | false | 2017-01-04T17:56:15Z |
def after_delete(
cls,
_mapper: Mapper,
connection: Connection,
target: Union[Dashboard, FavStar, Slice, Query, SqlaTable],
) -> None:
session = Session(bind=connection)
# delete row from `tagged_objects`
session.query(TaggedObject).filter(
TaggedObject.object_type == cls.object_type,
TaggedObject.object_id == target.id,
).delete()
session.commit() | def after_delete(
cls,
_mapper: Mapper,
connection: Connection,
target: Dashboard | FavStar | Slice | Query | SqlaTable,
) -> None:
session = Session(bind=connection)
# delete row from `tagged_objects`
session.query(TaggedObject).filter(
TaggedObject.object_type == cls.object_type,
TaggedObject.object_id == target.id,
).delete()
session.commit() | Merge branch 'master' into fix/db-val-param-perms | https://github.com/apache/superset/commit/4e2fd6f4f04c61e8c1d3ec3f233581a05f8b6213 | null | null | superset/tags/models.py | 0 | py | false | 2023-06-05T08:42:54Z |
void registerServer() {
VersionAgnosticCommandManager.get().registerServerCommand(
CommandManager.literal(Modget.NAMESPACE_SERVER)
.then(CommandManager.literal(COMMAND_PARTS[0])
.then(CommandManager.literal(COMMAND_PARTS[1])
.requires(source -> source.hasPermissionLevel(PERMISSION_LEVEL))
.executes(context -> {
PlayerEntity player = context.getSource().getPlayer();
new StartThread(player).start();
return 1;
})
)
)
);
} | void registerServer() {
VersionAgnosticServerCommandManager.get().register(
CommandManager.literal(Modget.NAMESPACE_SERVER)
.then(CommandManager.literal(COMMAND_PARTS[0])
.then(CommandManager.literal(COMMAND_PARTS[1])
.requires(source -> source.hasPermissionLevel(PERMISSION_LEVEL))
.executes(context -> {
PlayerEntity player = context.getSource().getPlayer();
new StartThread(player).start();
return 1;
})
)
)
);
} | Don't upload modget-core build artifacts | https://github.com/ReviversMC/modget-minecraft/commit/17f5235f98d112ceb086533d1c4901e7a2a3fce9 | null | null | modget-core/src/main/java/com/github/reviversmc/modget/minecraft/command/ReposListCommand.java | 0 | java | false | 2022-08-12T14:50:34Z |
def create_python_agent(
llm: BaseLanguageModel,
tool: PythonREPLTool,
callback_manager: Optional[BaseCallbackManager] = None,
verbose: bool = False,
prefix: str = PREFIX,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a python agent from an LLM and tool."""
tools = [tool]
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
) | def create_python_agent(
llm: BaseLanguageModel,
tool: PythonREPLTool,
agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION,
callback_manager: Optional[BaseCallbackManager] = None,
verbose: bool = False,
prefix: str = PREFIX,
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a python agent from an LLM and tool."""
tools = [tool]
agent: BaseSingleActionAgent
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
elif agent_type == AgentType.OPENAI_FUNCTIONS:
system_message = SystemMessage(content=prefix)
_prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
agent = OpenAIFunctionsAgent(
llm=llm,
prompt=_prompt,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
else:
raise ValueError(f"Agent type {agent_type} not supported at the moment.")
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
**(agent_executor_kwargs or {}),
) | merge | https://github.com/hwchase17/langchain/commit/37f4f246d797db6eecfab0e35748101a33667b12 | null | null | langchain/agents/agent_toolkits/python/base.py | 0 | py | false | 2023-06-20T08:05:46Z |
def fetch_projects():
if self.server is None:
return all_project_map
for project in TSC.Pager(self.server.projects):
all_project_map[project.id] = TableauProject(
id=project.id,
name=project.name,
parent_id=project.parent_id,
parent_name=None,
description=project.description,
path=[],
)
# Set parent project name
for project_id, project in all_project_map.items():
if (
project.parent_id is not None
and project.parent_id in all_project_map
):
project.parent_name = all_project_map[project.parent_id].name | def fetch_projects():
if self.server is None:
return all_project_map
for project in TSC.Pager(self.server.projects):
all_project_map[project.id] = TableauProject(
id=project.id,
name=project.name,
parent_id=project.parent_id,
parent_name=None,
description=project.description,
path=[],
)
# Set parent project name
for _project_id, project in all_project_map.items():
if (
project.parent_id is not None
and project.parent_id in all_project_map
):
project.parent_name = all_project_map[project.parent_id].name | Merge branch 'master' into feat/policyFixes | https://github.com/datahub-project/datahub/commit/395a1bdeb8dfc62df94a10a3bc38b085d082feca | null | null | metadata-ingestion/src/datahub/ingestion/source/tableau.py | 0 | py | false | 2023-11-03T15:19:26Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.