query stringlengths 9 9.05k | document stringlengths 10 222k | metadata dict | negatives listlengths 30 30 | negative_scores listlengths 30 30 | document_score stringlengths 4 10 | document_rank stringclasses 2
values |
|---|---|---|---|---|---|---|
Tests gotoField if there is a mismatch between MCP and guider. | def test_goto_field_cartridge_mismatch(self):
sopTester.updateModel('guider', TestHelper.guiderState['bossLoaded'])
mcpState = TestHelper.mcpState['boss_science']
mcpState.update({'instrumentNum': [15]})
sopTester.updateModel('mcp', mcpState)
cmdState = self.actorState.gotoField
cmdState.reinitialize(self.cmd)
masterThread.goto_field(self.cmd, cmdState, myGlobals.actorState)
self._check_cmd(0, 14, 0, 0, finish=True, didFail=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_goto_field_apogee_no_guider(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n self._goto_feld_apogee(3, 11, 0, 0, cmdState)",
"def test_goto_field_apogee(self):\n cmdState = self.actorState.gotoField\n ... | [
"0.67638075",
"0.6552739",
"0.6286361",
"0.6170494",
"0.6125492",
"0.5995649",
"0.59449685",
"0.5715474",
"0.5527284",
"0.5455693",
"0.5430143",
"0.54134613",
"0.530795",
"0.530067",
"0.5265416",
"0.5247163",
"0.52226955",
"0.5134902",
"0.5128405",
"0.51222634",
"0.5118506",
... | 0.7532309 | 0 |
Unwraps the private key into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey object | def unwrap(self):
if self.algorithm == 'rsa':
return self.asn1['private_key'].parsed
if self.algorithm == 'dsa':
params = self.asn1['private_key_algorithm']['parameters']
return DSAPrivateKey({
'version': 0,
'p': params['p'],
'q': params['q'],
'g': params['g'],
'public_key': self.public_key.unwrap(),
'private_key': self.asn1['private_key'].parsed,
})
if self.algorithm == 'ec':
output = self.asn1['private_key'].parsed
output['parameters'] = self.asn1['private_key_algorithm']['parameters']
output['public_key'] = self.public_key.unwrap()
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].p... | [
"0.7709716",
"0.7091911",
"0.6661885",
"0.6650223",
"0.6593615",
"0.6553231",
"0.64998823",
"0.6498649",
"0.6429571",
"0.64050627",
"0.6381211",
"0.6249152",
"0.6246417",
"0.6217567",
"0.62138116",
"0.6204138",
"0.6198371",
"0.6193991",
"0.6141875",
"0.6141632",
"0.6117683",
... | 0.7861159 | 0 |
Unwraps a public key into an asn1crypto.keys.RSAPublicKey, asn1crypto.core.Integer (for DSA) or asn1crypto.keys.ECPointBitString object | def unwrap(self):
if self.algorithm == 'ec':
return self.asn1['public_key']
return self.asn1['public_key'].parsed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading... | [
"0.70528996",
"0.682243",
"0.6711425",
"0.67085093",
"0.6610189",
"0.65048105",
"0.6489869",
"0.6488379",
"0.64881927",
"0.64246345",
"0.6423231",
"0.64138883",
"0.6409382",
"0.6395284",
"0.63761365",
"0.6347127",
"0.6329381",
"0.6321539",
"0.62870216",
"0.6245045",
"0.619662... | 0.71075326 | 0 |
Unwraps an asn1crypto.keys.PrivateKeyInfo object into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey. | def _unwrap_private_key_info(key_info):
key_alg = key_info.algorithm
if key_alg == 'rsa' or key_alg == 'rsassa_pss':
return key_info['private_key'].parsed
if key_alg == 'dsa':
params = key_info['private_key_algorithm']['parameters']
parsed = key_info['private_key'].parsed
return DSAPrivateKey({
'version': 0,
'p': params['p'],
'q': params['q'],
'g': params['g'],
'public_key': Integer(pow(
params['g'].native,
parsed.native,
params['p'].native
)),
'private_key': parsed,
})
if key_alg == 'ec':
parsed = key_info['private_key'].parsed
parsed['parameters'] = key_info['private_key_algorithm']['parameters']
return parsed
raise ValueError('Unsupported key_info.algorithm "%s"' % key_info.algorithm) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params[... | [
"0.72001606",
"0.61105424",
"0.59807235",
"0.59552276",
"0.5781235",
"0.5733041",
"0.5708165",
"0.56828797",
"0.5617318",
"0.56119716",
"0.55409443",
"0.55051327",
"0.54653615",
"0.54501706",
"0.5382498",
"0.53593355",
"0.53350115",
"0.5326151",
"0.5310217",
"0.5288465",
"0.5... | 0.82866263 | 0 |
Removes PEMencoding from a public key, private key or certificate. If the private key is encrypted, the password will be used to decrypt it. | def _unarmor_pem(data, password=None):
object_type, headers, der_bytes = unarmor(data)
type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'
armor_type = re.match(type_regex, object_type)
if not armor_type:
raise ValueError(pretty_message(
'''
data does not seem to contain a PEM-encoded certificate, private
key or public key
'''
))
pem_header = armor_type.group(1)
data = data.strip()
# RSA private keys are encrypted after being DER-encoded, but before base64
# encoding, so they need to be handled specially
if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):
algo = armor_type.group(2).lower()
return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))
key_type = pem_header.lower()
algo = None
if key_type == 'encrypted private key':
key_type = 'private key'
elif key_type == 'rsa public key':
key_type = 'public key'
algo = 'rsa'
return (key_type, algo, der_bytes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def key_to_pem(key, password=None):\n if password:\n enc = BestAvailableEncryption(as_bytes(password))\n else:\n enc = NoEncryption()\n return key.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, enc)",
"def strip_begin_end_public_key(key):\n return key.replace(\"\\n\", \"\")\\\n ... | [
"0.5717775",
"0.5633551",
"0.55747586",
"0.55693495",
"0.55062234",
"0.5487441",
"0.544285",
"0.5425336",
"0.5414394",
"0.5376748",
"0.53717756",
"0.53678894",
"0.53630817",
"0.53443956",
"0.5333764",
"0.53271896",
"0.532627",
"0.5300595",
"0.5287096",
"0.5276774",
"0.5239591... | 0.5872964 | 0 |
Parses a PKCS12 ANS.1 DERencoded structure and extracts certs and keys | def _parse_pkcs12(data, password, load_private_key):
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if password is not None:
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
else:
password = b''
certs = {}
private_keys = {}
pfx = Pfx.load(data)
auth_safe = pfx['auth_safe']
if auth_safe['content_type'].native != 'data':
raise ValueError(pretty_message(
'''
Only password-protected PKCS12 files are currently supported
'''
))
authenticated_safe = pfx.authenticated_safe
mac_data = pfx['mac_data']
if mac_data:
mac_algo = mac_data['mac']['digest_algorithm']['algorithm'].native
key_length = {
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64,
'sha512_224': 28,
'sha512_256': 32,
}[mac_algo]
mac_key = pkcs12_kdf(
mac_algo,
password,
mac_data['mac_salt'].native,
mac_data['iterations'].native,
key_length,
3 # ID 3 is for generating an HMAC key
)
hash_mod = getattr(hashlib, mac_algo)
computed_hmac = hmac.new(mac_key, auth_safe['content'].contents, hash_mod).digest()
stored_hmac = mac_data['mac']['digest'].native
if not constant_compare(computed_hmac, stored_hmac):
raise ValueError('Password provided is invalid')
for content_info in authenticated_safe:
content = content_info['content']
if isinstance(content, OctetString):
_parse_safe_contents(content.native, certs, private_keys, password, load_private_key)
elif isinstance(content, EncryptedData):
encrypted_content_info = content['encrypted_content_info']
encryption_algorithm_info = encrypted_content_info['content_encryption_algorithm']
encrypted_content = encrypted_content_info['encrypted_content'].native
decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password)
_parse_safe_contents(decrypted_content, certs, private_keys, password, load_private_key)
else:
raise ValueError(pretty_message(
'''
Public-key-based PKCS12 files are not currently supported
'''
))
key_fingerprints = set(private_keys.keys())
cert_fingerprints = set(certs.keys())
common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints))
key = None
cert = None
other_certs = []
if len(common_fingerprints) >= 1:
fingerprint = common_fingerprints[0]
key = private_keys[fingerprint]
cert = certs[fingerprint]
other_certs = [certs[f] for f in certs if f != fingerprint]
return (key, cert, other_certs)
if len(private_keys) > 0:
first_key = sorted(list(private_keys.keys()))[0]
key = private_keys[first_key]
if len(certs) > 0:
first_key = sorted(list(certs.keys()))[0]
cert = certs[first_key]
del certs[first_key]
if len(certs) > 0:
other_certs = sorted(list(certs.values()), key=lambda c: c.subject.human_friendly)
return (key, cert, other_certs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parseBinary(self, bytes):\r\n\r\n self.bytes = bytearray(bytes)\r\n p = ASN1Parser(bytes)\r\n\r\n #Get the tbsCertificate\r\n tbsCertificateP = p.getChild(0)\r\n\r\n #Is the optional version field present?\r\n #This determines which index the key is at.\r\n if t... | [
"0.60229003",
"0.5695678",
"0.5526086",
"0.54503",
"0.5424802",
"0.51653993",
"0.51222205",
"0.5089913",
"0.5076887",
"0.50647706",
"0.5058496",
"0.4928613",
"0.4890625",
"0.48888293",
"0.48255894",
"0.47993195",
"0.47745132",
"0.47668105",
"0.47628716",
"0.47628716",
"0.4759... | 0.67407256 | 0 |
Parses a SafeContents PKCS12 ANS.1 structure and extracts certs and keys | def _parse_safe_contents(safe_contents, certs, private_keys, password, load_private_key):
if isinstance(safe_contents, byte_cls):
safe_contents = SafeContents.load(safe_contents)
for safe_bag in safe_contents:
bag_value = safe_bag['bag_value']
if isinstance(bag_value, CertBag):
if bag_value['cert_id'].native == 'x509':
cert = bag_value['cert_value'].parsed
public_key_info = cert['tbs_certificate']['subject_public_key_info']
certs[_fingerprint(public_key_info, None)] = bag_value['cert_value'].parsed
elif isinstance(bag_value, PrivateKeyInfo):
private_keys[_fingerprint(bag_value, load_private_key)] = bag_value
elif isinstance(bag_value, EncryptedPrivateKeyInfo):
encryption_algorithm_info = bag_value['encryption_algorithm']
encrypted_key_bytes = bag_value['encrypted_data'].native
decrypted_key_bytes = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_key_bytes, password)
private_key = PrivateKeyInfo.load(decrypted_key_bytes)
private_keys[_fingerprint(private_key, load_private_key)] = private_key
elif isinstance(bag_value, SafeContents):
_parse_safe_contents(bag_value, certs, private_keys, password, load_private_key)
else:
# We don't care about CRL bags or secret bags
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_pkcs12(data, password, load_private_key):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinsta... | [
"0.6905935",
"0.519228",
"0.5144184",
"0.5090635",
"0.4969031",
"0.49095032",
"0.48951414",
"0.48807597",
"0.4877804",
"0.48451734",
"0.4826676",
"0.48249158",
"0.48138803",
"0.4775471",
"0.47672594",
"0.4758738",
"0.47264582",
"0.47241712",
"0.46494445",
"0.4630516",
"0.4615... | 0.6486718 | 1 |
Process and return selected confounds from the confounds file | def _select_confounds(confounds_file, selected_confounds):
import pandas as pd
import numpy as np
confounds_df = pd.read_csv(confounds_file, sep='\t', na_values='n/a')
# fill the first value of FramewiseDisplacement with the mean.
if 'FramewiseDisplacement' in selected_confounds:
confounds_df['FramewiseDisplacement'] = confounds_df['FramewiseDisplacement'].fillna(
np.mean(confounds_df['FramewiseDisplacement']))
desired_confounds = confounds_df[selected_confounds]
return desired_confounds | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _select_confounds(confounds_file, selected_confounds):\n import pandas as pd\n import numpy as np\n import re\n\n confounds_df = pd.read_csv(confounds_file, sep='\\t', na_values='n/a')\n # regular expression to capture confounds specified at the command line\n confound_expr = re.compile(r\"|\... | [
"0.65061325",
"0.5781993",
"0.5781708",
"0.56171685",
"0.55821574",
"0.55591077",
"0.55106205",
"0.5485491",
"0.5484373",
"0.5472426",
"0.54602283",
"0.54551107",
"0.5445359",
"0.5434916",
"0.5391505",
"0.5335677",
"0.53314865",
"0.53281003",
"0.5295112",
"0.52804834",
"0.527... | 0.6170222 | 1 |
Return a cached copy of TestShib's metadata with a cacheDuration attribute | def cache_duration_metadata_callback(_request, _uri, headers):
return (200, headers, self.read_data_file('testshib_metadata_with_cache_duration.xml')) # lint-amnesty, pylint: disable=no-member | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])",
"def get_metadata(self):\n return copy.copy(self.metadata)",
"def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n ... | [
"0.5946828",
"0.5924196",
"0.5874222",
"0.58641565",
"0.58568573",
"0.5794843",
"0.57618964",
"0.5748162",
"0.5736788",
"0.5663821",
"0.5662813",
"0.5655036",
"0.5647174",
"0.56243145",
"0.55787057",
"0.5567786",
"0.55402327",
"0.5527116",
"0.55239946",
"0.55070686",
"0.54851... | 0.6988418 | 0 |
Enable and configure the TestShib SAML IdP as a third_party_auth provider | def _configure_testshib_provider(self, **kwargs):
fetch_metadata = kwargs.pop('fetch_metadata', True)
assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)
kwargs.setdefault('name', self.PROVIDER_NAME)
kwargs.setdefault('enabled', True)
kwargs.setdefault('visible', True)
kwargs.setdefault("backend_name", "tpa-saml")
kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)
kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)
kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName
kwargs.setdefault('max_session_length', None)
kwargs.setdefault('send_to_registration_first', False)
kwargs.setdefault('skip_email_verification', False)
saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member
if fetch_metadata:
assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member
num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()
if assert_metadata_updates:
assert num_total == 1 # lint-amnesty, pylint: disable=no-member
assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member
assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member
assert num_updated == 1 # lint-amnesty, pylint: disable=no-member
assert num_failed == 0 # lint-amnesty, pylint: disable=no-member
assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member
return saml_provider | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enable_third_party_auth():\r\n\r\n from third_party_auth import settings as auth_settings\r\n auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)",
"def init_saml_auth(saml_prepared_flask_request):\n return OneLogin_Saml2_Auth(saml_prepared_flask_request, custom_base_path=app.config.ge... | [
"0.67878383",
"0.63547695",
"0.63465583",
"0.61199355",
"0.59225214",
"0.58825934",
"0.56425726",
"0.563755",
"0.562901",
"0.5535802",
"0.5476775",
"0.53583604",
"0.531819",
"0.5295527",
"0.52504843",
"0.51937705",
"0.5125788",
"0.50825155",
"0.5081821",
"0.5010863",
"0.49874... | 0.7070957 | 0 |
Configure TestShib before running the login test | def test_login(self):
self._configure_testshib_provider()
self._test_login() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_register(self):\n self._configure_testshib_provider()\n self._test_register()",
"def set_up_login():\n\n bitool.app.testing = True\n bitool.app.config['TESTING'] = True\n bitool.app.login_manager.init_app(bitool.app)\n app = bitool.app.test_client()\n\n return app",
"def c... | [
"0.68922627",
"0.6773958",
"0.6753465",
"0.6616434",
"0.65026134",
"0.6428037",
"0.6423407",
"0.6388931",
"0.63668215",
"0.6346543",
"0.63392085",
"0.6336799",
"0.62982786",
"0.62645006",
"0.62591666",
"0.62502235",
"0.62502235",
"0.62462556",
"0.62462556",
"0.62462556",
"0.6... | 0.81601787 | 0 |
Configure TestShib before running the register test | def test_register(self):
self._configure_testshib_provider()
self._test_register() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_test(self, test, config_json):\n pass",
"def test_register():\n plug.manager.register(junit4)",
"def setUpConfig(self):\n pass",
"def test_login(self):\n self._configure_testshib_provider()\n self._test_login()",
"def setup_method(self, test_method):\n se... | [
"0.7068733",
"0.6525172",
"0.6467879",
"0.63710594",
"0.6313307",
"0.6234248",
"0.6212751",
"0.60972005",
"0.6043006",
"0.6039801",
"0.5995316",
"0.5995316",
"0.5995316",
"0.5995316",
"0.5992337",
"0.5989009",
"0.59598655",
"0.5953964",
"0.5928903",
"0.59063",
"0.5904783",
... | 0.83610606 | 0 |
Test that attributes sent by a SAML provider are stored in the UserSocialAuth table. | def test_login_records_attributes(self):
self.test_login()
record = UserSocialAuth.objects.get(
user=self.user, provider=self.PROVIDER_BACKEND, uid__startswith=self.PROVIDER_IDP_SLUG
)
attributes = record.extra_data
assert attributes.get('urn:oid:1.3.6.1.4.1.5923.1.1.1.9') == ['Member@testshib.org', 'Staff@testshib.org']
assert attributes.get('urn:oid:2.5.4.3') == ['Me Myself And I']
assert attributes.get('urn:oid:0.9.2342.19200300.100.1.1') == ['myself']
assert attributes.get('urn:oid:2.5.4.20') == ['555-5555']
# Phone number | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Becau... | [
"0.6245322",
"0.59301543",
"0.5911321",
"0.58538973",
"0.58124703",
"0.57879347",
"0.56024635",
"0.5550127",
"0.55485487",
"0.54875433",
"0.54671925",
"0.54621214",
"0.54423463",
"0.5395432",
"0.5389309",
"0.5383121",
"0.5311547",
"0.5295665",
"0.5293737",
"0.52753067",
"0.52... | 0.7325418 | 0 |
Test SAML login logs with debug mode enabled or not | def test_debug_mode_login(self, debug_mode_enabled):
self._configure_testshib_provider(debug_mode=debug_mode_enabled)
with patch.object(saml_log, 'info') as mock_log:
self._test_login()
if debug_mode_enabled:
# We expect that test_login() does two full logins, and each attempt generates two
# logs - one for the request and one for the response
assert mock_log.call_count == 4
expected_next_url = "/dashboard"
(msg, action_type, idp_name, request_data, next_url, xml), _kwargs = mock_log.call_args_list[0]
assert msg.startswith('SAML login %s')
assert action_type == 'request'
assert idp_name == self.PROVIDER_IDP_SLUG
self.assertDictContainsSubset(
{"idp": idp_name, "auth_entry": "login", "next": expected_next_url},
request_data
)
assert next_url == expected_next_url
assert '<samlp:AuthnRequest' in xml
(msg, action_type, idp_name, response_data, next_url, xml), _kwargs = mock_log.call_args_list[1]
assert msg.startswith('SAML login %s')
assert action_type == 'response'
assert idp_name == self.PROVIDER_IDP_SLUG
self.assertDictContainsSubset({"RelayState": idp_name}, response_data)
assert 'SAMLResponse' in response_data
assert next_url == expected_next_url
assert '<saml2p:Response' in xml
else:
assert not mock_log.called | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_logging_running(self):\n tester = app.test_client(self)\n response = tester.get('/login', content_type='html/text')\n self.assertTrue(b'PLEASE LOGIN' in response.data)",
"def test_logging(self):\n self._verify_logging()",
"def test_successful_login(self):\n pass",
"def... | [
"0.6547012",
"0.61618876",
"0.6142526",
"0.6016165",
"0.5957221",
"0.59324735",
"0.5907222",
"0.58748484",
"0.58701736",
"0.5860925",
"0.582712",
"0.5819717",
"0.57623357",
"0.5748084",
"0.57179964",
"0.56889266",
"0.56684595",
"0.56650877",
"0.5635419",
"0.5612174",
"0.55851... | 0.8257416 | 0 |
Test that when we have a TPA provider which as an explicit maximum session length set, waiting for longer than that between requests results in us being logged out. | def test_login_with_testshib_provider_short_session_length(self):
# Configure the provider with a 10-second timeout
self._configure_testshib_provider(max_session_length=10)
now = datetime.datetime.utcnow()
with freeze_time(now):
# Test the login flow, adding the user in the process
self._test_login()
# Wait 30 seconds; longer than the manually-set 10-second timeout
later = now + datetime.timedelta(seconds=30)
with freeze_time(later):
# Test returning as a logged in user; this method verifies that we're logged out first.
self._test_return_login(previous_session_timed_out=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_inactive_session_timeout(self):\r\n email, password = self.STUDENT_INFO[0]\r\n self.login(email, password)\r\n\r\n # make sure we can access courseware immediately\r\n resp = self.client.get(reverse('dashboard'))\r\n self.assertEquals(resp.status_code, 200)\r\n\r\n ... | [
"0.67007613",
"0.6563989",
"0.6529781",
"0.63517386",
"0.6243885",
"0.6232972",
"0.6106514",
"0.6026753",
"0.5952208",
"0.59390306",
"0.5926259",
"0.58408374",
"0.58186436",
"0.5785342",
"0.57843",
"0.57781994",
"0.57300466",
"0.572285",
"0.57226133",
"0.5715555",
"0.57084894... | 0.77491003 | 0 |
Mock an error response when calling the OData API for user details. | def _mock_odata_api_for_error(self, odata_api_root_url, username):
def callback(request, uri, headers): # lint-amnesty, pylint: disable=unused-argument
"""
Return a 500 error when someone tries to call the URL.
"""
headers['CorrelationId'] = 'aefd38b7-c92c-445a-8c7a-487a3f0c7a9d'
headers['RequestNo'] = '[787177]' # This is the format SAPSF returns for the transaction request number
return 500, headers, 'Failure!'
fields = ','.join(SapSuccessFactorsIdentityProvider.default_field_mapping.copy())
url = '{root_url}User(userId=\'{user_id}\')?$select={fields}'.format(
root_url=odata_api_root_url,
user_id=username,
fields=fields,
)
httpretty.register_uri(httpretty.GET, url, body=callback, content_type='application/json')
return url | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_response_error(self):\n r = mock.Mock(spec=requests.Response)\n r.content = \"{'normal': 'resource'}\"\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n\n r.status_code = 404\n self.assertRaises(exceptions.HTTPNotFound, f.user_... | [
"0.68724155",
"0.6683057",
"0.6637301",
"0.6578398",
"0.6559852",
"0.6474468",
"0.6464813",
"0.6425861",
"0.638332",
"0.63573205",
"0.6336519",
"0.6328632",
"0.63244635",
"0.62987155",
"0.6267484",
"0.62195075",
"0.61980534",
"0.6191649",
"0.61694646",
"0.614538",
"0.61373806... | 0.73752326 | 0 |
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone). | def test_register_sapsf_metadata_present_override_relevant_value(self):
value_map = {'country': {'Australia': 'NZ'}}
expected_country = 'NZ'
provider_settings = {
'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',
'odata_company_id': 'NCC1701D',
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
}
if value_map:
provider_settings['sapsf_value_mappings'] = value_map
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps(provider_settings)
)
self._test_register(country=expected_country) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_priva... | [
"0.70553404",
"0.66161525",
"0.63435036",
"0.57704574",
"0.56966025",
"0.5679671",
"0.55836266",
"0.53828114",
"0.53427476",
"0.5324517",
"0.53106654",
"0.52406067",
"0.52070177",
"0.5202646",
"0.5187437",
"0.51687354",
"0.51323",
"0.5131696",
"0.5125414",
"0.51162493",
"0.50... | 0.69396245 | 1 |
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone). | def test_register_sapsf_metadata_present_override_other_value(self):
value_map = {'country': {'United States': 'blahfake'}}
expected_country = 'AU'
provider_settings = {
'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',
'odata_company_id': 'NCC1701D',
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
}
if value_map:
provider_settings['sapsf_value_mappings'] = value_map
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps(provider_settings)
)
self._test_register(country=expected_country) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_... | [
"0.6940789",
"0.6617741",
"0.6345586",
"0.57743275",
"0.5698771",
"0.56804734",
"0.55835336",
"0.5381514",
"0.5341755",
"0.5321272",
"0.5309637",
"0.5242255",
"0.5205009",
"0.5200889",
"0.5184601",
"0.51705694",
"0.51325846",
"0.5132514",
"0.5125601",
"0.51126003",
"0.5099695... | 0.70560694 | 0 |
Test case for get_chain_by_id | def test_get_chain_by_id(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_chains(self):\n pass",
"def get_chain(self, chain_id):\n if self.chain_dict.has_key(chain_id):\n return self.chain_dict[chain_id]\n return None",
"def test_solareclipses_id_get(self):\n pass",
"def sample_chains():\n c = chain(add.s(1, 1), add.s(1), add.... | [
"0.66562283",
"0.6344267",
"0.60350573",
"0.6012869",
"0.5983662",
"0.5644152",
"0.5638522",
"0.56080955",
"0.55986637",
"0.5579138",
"0.54949653",
"0.5461469",
"0.54595417",
"0.5459411",
"0.5446837",
"0.5446837",
"0.54265",
"0.53988206",
"0.5392831",
"0.5390298",
"0.5332432"... | 0.93600637 | 0 |
Test case for get_chains | def test_get_chains(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_chain_by_id(self):\n pass",
"def get_chains (structure):\n chains=[]\n for chain in structure[0]:\n chains.append(chain)\n return chains",
"def iter_chains(self):\n if self.default_model:\n return iter(self.default_model.chain_list)\n return iter(lis... | [
"0.6914287",
"0.67781395",
"0.6644035",
"0.6538033",
"0.652312",
"0.6489876",
"0.64195883",
"0.63450104",
"0.62671566",
"0.61932814",
"0.60854757",
"0.6066701",
"0.605091",
"0.6029899",
"0.6004666",
"0.60018355",
"0.5970353",
"0.59533113",
"0.5910838",
"0.5885802",
"0.5868115... | 0.94273794 | 0 |
Test case for post_chain | def test_post_chain(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_post_chain_search(self):\n pass",
"def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self... | [
"0.72168523",
"0.65788776",
"0.6171648",
"0.6129438",
"0.61099774",
"0.61021096",
"0.60098636",
"0.5998518",
"0.59712416",
"0.5869386",
"0.58414704",
"0.5824033",
"0.5820647",
"0.5788468",
"0.5751005",
"0.5730567",
"0.57289803",
"0.57287365",
"0.5715039",
"0.5704063",
"0.5691... | 0.9139945 | 0 |
Test case for post_chain_search | def test_post_chain_search(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_post_chain(self):\n pass",
"def test_post_foods_search(self):\n pass",
"def test_search(self):\n pass",
"def test_search(self):\n pass",
"def test_search(self):\n pass",
"def test_search_systems_post(self):\n pass",
"def test_search_organizations_post(... | [
"0.7415727",
"0.6913196",
"0.66014063",
"0.66014063",
"0.66014063",
"0.65811795",
"0.6300726",
"0.615566",
"0.5992487",
"0.5984323",
"0.59211963",
"0.58675545",
"0.58613753",
"0.58335793",
"0.58033735",
"0.5775473",
"0.57752",
"0.5766603",
"0.57302696",
"0.57134694",
"0.56959... | 0.9217483 | 0 |
Build a networkx graph object from variables and relations. | def as_networkx_graph(variables, relations):
graph = nx.Graph()
# One node for each variables
graph.add_nodes_from([v.name for v in variables])
for r in relations:
for p in all_pairs([e.name for e in r.dimensions]):
graph.add_edge(*p)
return graph | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_networkx_bipartite_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables], bipartite=0)\n graph.add_nodes_from([r.name for r in relations], bipartite=1)\n\n for r in relations:\n for e in r.dimensions:\n ... | [
"0.73159355",
"0.67514896",
"0.6639457",
"0.663262",
"0.6631219",
"0.6617338",
"0.65870404",
"0.6585843",
"0.65561634",
"0.64985657",
"0.64818686",
"0.64802456",
"0.64401877",
"0.64243275",
"0.64049304",
"0.6382079",
"0.63409054",
"0.6319503",
"0.6298616",
"0.6286521",
"0.627... | 0.81062376 | 0 |
Build a networkx graph object from variables and relations. | def as_networkx_bipartite_graph(variables, relations):
graph = nx.Graph()
# One node for each variables
graph.add_nodes_from([v.name for v in variables], bipartite=0)
graph.add_nodes_from([r.name for r in relations], bipartite=1)
for r in relations:
for e in r.dimensions:
graph.add_edge(r.name, e.name)
return graph | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_networkx_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables])\n\n for r in relations:\n for p in all_pairs([e.name for e in r.dimensions]):\n graph.add_edge(*p)\n return graph",
"def initial... | [
"0.81066954",
"0.6750233",
"0.6639164",
"0.66322136",
"0.6630946",
"0.6617218",
"0.6586329",
"0.65858966",
"0.65559703",
"0.6498455",
"0.64820826",
"0.6480336",
"0.6440278",
"0.64248806",
"0.6404701",
"0.6381441",
"0.63398075",
"0.6318121",
"0.62991834",
"0.6286606",
"0.62709... | 0.73165965 | 1 |
Display the variables and relation as a graph, using networkx and matplotlib. | def display_graph(variables, relations):
graph = as_networkx_graph(variables, relations)
# Do not crash if matplotlib is not installed
try:
import matplotlib.pyplot as plt
nx.draw_networkx(graph, with_labels=True)
# nx.draw_random(graph)
# nx.draw_circular(graph)
# nx.draw_spectral(graph)
plt.show()
except ImportError:
print("ERROR: cannot display graph, matplotlib is not installed") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def visualize_dependency_graph(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be plotted\")\n return\n\n nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)\n plt.show()",
"def plot_graph(self) -> None:",
"def display_biparti... | [
"0.75382435",
"0.7278241",
"0.7211741",
"0.71995574",
"0.70474607",
"0.69660336",
"0.6962253",
"0.68822443",
"0.6876223",
"0.68665344",
"0.685607",
"0.6749594",
"0.67388505",
"0.67268574",
"0.6711935",
"0.6700764",
"0.6693562",
"0.6683998",
"0.6630944",
"0.6603868",
"0.655565... | 0.8590499 | 0 |
Compute the graph diameter(s). If the graph contains several independent sub graph, returns a list the diamater of each of the subgraphs. | def graph_diameter(variables, relations):
diams = []
g = as_networkx_graph(variables, relations)
components = (g.subgraph(c).copy() for c in nx.connected_components(g))
for c in components:
diams.append(nx.diameter(c))
return diams | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def diameter(self):\n\n v = self.vertices()\n pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]\n smallest_paths = []\n for (s,e) in pairs:\n paths = self.find_all_path(s,e)\n smallest = sorted(paths, key=len)[0]\n smallest_path... | [
"0.700528",
"0.64061725",
"0.6378661",
"0.6351375",
"0.6088722",
"0.5843668",
"0.5843668",
"0.5829243",
"0.57851386",
"0.56062824",
"0.55966824",
"0.55673695",
"0.55673695",
"0.55229545",
"0.5520967",
"0.5497087",
"0.54773235",
"0.54561347",
"0.5399723",
"0.5395755",
"0.53897... | 0.7837539 | 0 |
Generate all possible pairs from the list of given elements. | def all_pairs(elements):
if len(elements) < 2:
return []
elif len(elements) == 2:
return [(elements[0], elements[1])]
else:
new_pairs = []
for elt in elements[1:]:
new_pairs.append((elements[0], elt))
return all_pairs(elements[1:]) + new_pairs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pairs(lst):\r\n\tfor i in range(1, len(lst), 2):\r\n\t\tyield lst[i-1], lst[i]",
"def all_pairs(items, sort=False):\n if sort:\n items = sorted(items)\n for i, ni in enumerate(items):\n for j, nj in enumerate(items):\n if j > i: yield ni, nj",
"def __unordered_pairs(l):\n\n ... | [
"0.7500274",
"0.6851043",
"0.6840614",
"0.68141717",
"0.6754132",
"0.669949",
"0.6683781",
"0.6671177",
"0.6668704",
"0.65523124",
"0.64457446",
"0.64399433",
"0.6394851",
"0.636628",
"0.6352524",
"0.6351291",
"0.6324047",
"0.6315178",
"0.6308765",
"0.63008136",
"0.62359875",... | 0.8119166 | 0 |
serialize internal keyvalue pair to byte_array, only pickle objects when necessary | def serialize(self):
byte_array = bytearray()
header = (
self.sequence_number | (1 << 63)
if self.type == KeyType.PUT
else self.sequence_number
)
# append header first
byte_array.extend(byte_utils.integer_to_n_bytes_array(header, 8))
pickle_key = pickle.dumps(self.key)
# key length
byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_key)))
# key byte array
byte_array.extend(pickle_key)
# it is a put operation, value is needed
if self.type == KeyType.PUT:
pickle_value = pickle.dumps(self.value)
# value length
byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_value)))
# value byte array
byte_array.extend(pickle_value)
return bytes(byte_array) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def serialize(self, value) -> bytes:\n pass",
"def _encode_value(self, value):\n return pickle.dumps(value)",
"def serialize(obj):\n return pickle.dumps(obj)",
"def dump_object(self, value):\n return pickle.dumps(value)",
"def __bytes__(self):\n byteout = bytearray()\n f... | [
"0.64888334",
"0.6360432",
"0.6315611",
"0.6277576",
"0.6177769",
"0.6171004",
"0.6164912",
"0.6144653",
"0.6144653",
"0.61439574",
"0.6126626",
"0.6126626",
"0.60924256",
"0.60020936",
"0.59987134",
"0.59793663",
"0.5974535",
"0.59639865",
"0.5963505",
"0.5941758",
"0.593160... | 0.7145172 | 0 |
Import ASHRAE data from a directory containing the .csv files. | def import_data(ashrae_dir, filenames=const.NAMES):
print('Importing data from csv')
ashrae_dir = pathlib.Path(ashrae_dir)
data = {name: pd.read_csv((ashrae_dir / name).with_suffix('.csv')) for name in filenames}
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd... | [
"0.64119494",
"0.62574255",
"0.6136558",
"0.60596466",
"0.6012882",
"0.5962187",
"0.5958087",
"0.5912588",
"0.58974314",
"0.5878545",
"0.58628714",
"0.5797354",
"0.57774633",
"0.577703",
"0.5762553",
"0.57592994",
"0.57388955",
"0.57192475",
"0.57123953",
"0.5707139",
"0.5690... | 0.7007926 | 0 |
Import ASHRAE data with optional caching mechanism. | def get_raw_data(ashrae_dir, cache_file=None, filenames=const.NAMES):
cache_file = pathlib.Path(cache_file)
if cache_file is not None and cache_file.exists():
data = import_dict_from_cached(cache_file, filenames)
else:
data = import_data(ashrae_dir)
_cache_data(data, cache_file)
# Sanity check: the set of building ids should be the same in the train and test sets.
assert set(data['train'].building_id) == set(data['test'].building_id)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_(self, data):\n return self.__import(data)",
"def load_data(self) -> None:",
"def load_data(data_set_key: str, a2e_data_path: str = '../../../a2e-data/data', cache_dir: str = None) -> BearingDataSet:\n\n if a2e_data_path is not None and not a2e_data_path.startswith('http') and not a2e_data... | [
"0.5765304",
"0.5686181",
"0.56845134",
"0.56063354",
"0.56054366",
"0.55914676",
"0.5575657",
"0.55459553",
"0.5527377",
"0.54661703",
"0.5434593",
"0.54312104",
"0.5410122",
"0.5381183",
"0.5379974",
"0.5346656",
"0.5326773",
"0.5326303",
"0.52563024",
"0.52543855",
"0.5239... | 0.61214995 | 0 |
Return the number of timestamps missing | def count_missing_timestamps(df):
no_of_timestamps = len(df.timestamp)
no_of_sites = len(set(df.site_id))
full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H')
no_of_missing_timestamps = no_of_sites * len(full_date_range) - no_of_timestamps
print(f'There are {no_of_timestamps} timestamps in the data. The full date range is {len(full_date_range)} long and'
f' there are {no_of_sites} sites so there should be {no_of_sites * len(full_date_range)} '
f'timestamps in the data. There are therefore {no_of_missing_timestamps} missing. ')
return no_of_missing_timestamps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_no_missing_timesteps(timesteps, verbose=True):\n timesteps = _check_timesteps(timesteps)\n # Check if there are data\n if timesteps.size == 0:\n raise ValueError(\"No data available !\")\n # Check if missing timesteps\n dt = np.diff(timesteps)\n dts, counts = np.unique(dt, return... | [
"0.65900165",
"0.6369049",
"0.6316393",
"0.6256028",
"0.62290597",
"0.617701",
"0.61646646",
"0.6154375",
"0.60366976",
"0.6026818",
"0.6009626",
"0.5978013",
"0.59768033",
"0.5961701",
"0.5955014",
"0.58601505",
"0.5835052",
"0.58279943",
"0.5825557",
"0.5720708",
"0.5682689... | 0.8279923 | 0 |
Add missing timestamps to weather data and interpolate to fill in the data return df with missing times and weather data filled in | def add_missing_weather_data(df):
full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H')
sites = list(set(df.site_id))
full_data_site_range = pd.DataFrame(itertools.product(sites, full_date_range),
columns=['site_id', 'timestamp'])
df_all_dates = full_data_site_range.merge(df, on=['site_id', 'timestamp'], how='left')
df_all_dates = df_all_dates.groupby('site_id').apply(lambda group: group.interpolate(limit_direction='both'))
return df_all_dates | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def auto_fillna(ts: TimeSeries,\n **interpolate_kwargs) -> TimeSeries:\n\n ts_temp = ts.pd_dataframe()\n\n # pandas interpolate wrapper, with chosen `method`\n if 'limit_direction' not in interpolate_kwargs:\n interpolate_kwargs['limit_direction'] = 'both'\n interpolate_kwargs['in... | [
"0.7080858",
"0.68412477",
"0.6801546",
"0.67576146",
"0.6569322",
"0.6546012",
"0.6509167",
"0.6458176",
"0.6420076",
"0.6409065",
"0.6344805",
"0.6284957",
"0.62729025",
"0.6232209",
"0.6207936",
"0.6158501",
"0.61142576",
"0.6104147",
"0.606371",
"0.5919506",
"0.59161085",... | 0.8054154 | 0 |
Join together the meter data, weather data and building metadata into one df data = dict of df's (keys are'building_metadata', 'weather_train', 'weather_test', 'train','test') dataset_name = 'train' or 'test' returns a merged df which includes building_metadata, weather_train (or weather_test) and train (or test) | def join_input_data_and_multi_index(data, dataset_name):
meter_df = data[dataset_name]
building_df = data['building_metadata']
weather_df = data['weather_' + dataset_name]
# join meter and weather data
building_n_meter = meter_df.merge(building_df, on='building_id', how='left')
joined_data = building_n_meter.merge(weather_df, on=['site_id', 'timestamp'], how='left')
# Add time related columns
joined_data['hour'] = joined_data['timestamp'].dt.hour
joined_data['weekday'] = joined_data['timestamp'].dt.dayofweek
joined_data['week_number'] = joined_data['timestamp'].dt.week
joined_data['month'] = joined_data['timestamp'].dt.month
joined_data['is_weekend'] = joined_data['weekday'].apply(lambda x: 1 if x in [0, 6] else 0)
# multi index on building id and timestamp
joined_data = joined_data.set_index(['building_id', 'timestamp']).sort_index()
return joined_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression... | [
"0.59869367",
"0.57781816",
"0.5758608",
"0.563408",
"0.56181586",
"0.55629724",
"0.5561206",
"0.5549236",
"0.5505305",
"0.55047166",
"0.54905367",
"0.54782295",
"0.54737127",
"0.5469189",
"0.544329",
"0.54359186",
"0.54143375",
"0.5409169",
"0.5407379",
"0.53848493",
"0.5383... | 0.69934803 | 0 |
Split the joined data into a dict with a df for each meter type | def split_on_meter_type(joined_data, meter_types):
joined_data_dict = {meter_type: joined_data[joined_data['meter_type'] == meter_type]
for meter_type in meter_types}
return joined_data_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"'... | [
"0.54995227",
"0.5438723",
"0.5368502",
"0.5285218",
"0.5209991",
"0.5202449",
"0.51980925",
"0.5175748",
"0.5165017",
"0.5150522",
"0.51441115",
"0.5076895",
"0.50656265",
"0.50650203",
"0.5061612",
"0.5057883",
"0.50543916",
"0.5050856",
"0.49904716",
"0.4989884",
"0.498955... | 0.806629 | 0 |
Reset the state of the evironment for a new episode `setup` is used to let the reset function know when we're calling it from `setup`. If we don't, the 'random' init scheme should reset to the randomly choosen position instead of picking a new random one. | def reset(self, setup=False):
self._done = False
self._nbSteps = 0
x = None
if (self.startPosX == 'random' and setup) or (
self.startPosX == 'episodeRandom'):
x = random.randint(0, self._width - 1)
elif (self.startPosX == 'random' and not setup):
x = self._initState[0]
elif self.startPosX == 'center':
x = self._width - 1
else:
x = int(self.startPosX)
y = None
if (self.startPosX == 'random' and setup) or (
self.startPosX == 'episodeRandom'):
y = random.randint(0, self._height - 1)
elif (self.startPosY == 'random' and not setup):
y = self._initState[1]
elif self.startPosX == 'center':
y = self._height - 1
else:
y = int(self.startPosX)
self._currentPos = (x, y)
self._trajectory = [(x, y)]
return (x, y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n\n # initialize gym env variables\n self.finish = False\n self.curr_step = -1\n self.curr_episode += 1\n\n # initialize target position\n self.target = np.random.uniform(-10.0,10.0,size=(2))\n\n # initialize sheep positions\n if self.fixed_r... | [
"0.66944516",
"0.6647286",
"0.65143913",
"0.6312656",
"0.6290814",
"0.62830704",
"0.6277763",
"0.62062037",
"0.61978877",
"0.6173854",
"0.61685586",
"0.61399466",
"0.613353",
"0.61265284",
"0.6108667",
"0.6104944",
"0.60824627",
"0.60824627",
"0.6075161",
"0.6065723",
"0.6042... | 0.6798604 | 0 |
Make sure the netcdf cc data handler operates correctly | def test_data_handling_nc_cc():
input_files = [os.path.join(TEST_DATA_DIR, 'ua_test.nc'),
os.path.join(TEST_DATA_DIR, 'va_test.nc'),
os.path.join(TEST_DATA_DIR, 'orog_test.nc'),
os.path.join(TEST_DATA_DIR, 'zg_test.nc')]
with xr.open_mfdataset(input_files) as fh:
min_lat = np.min(fh.lat.values)
min_lon = np.min(fh.lon.values)
target = (min_lat, min_lon)
plevel = fh.plev[-1]
ua = np.transpose(fh['ua'][:, -1, ...].values, (1, 2, 0))
va = np.transpose(fh['va'][:, -1, ...].values, (1, 2, 0))
handler = DataHandlerNCforCC(input_files, features=['U_100m', 'V_100m'],
target=target, shape=(20, 20),
val_split=0.0,
worker_kwargs=dict(max_workers=1))
assert handler.data.shape == (20, 20, 20, 2)
handler = DataHandlerNCforCC(input_files,
features=[f'U_{int(plevel)}pa',
f'V_{int(plevel)}pa'],
target=target, shape=(20, 20),
val_split=0.0,
worker_kwargs=dict(max_workers=1))
if handler.invert_lat:
handler.data = handler.data[::-1]
assert handler.data.shape == (20, 20, 20, 2)
assert np.allclose(ua, handler.data[..., 0])
assert np.allclose(va, handler.data[..., 1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_data(self):\n # ================ CHECK DATA / CONNECT / SELECT ================\n N = self.xyz.shape[0]\n # Chech array :\n if (self.connect.shape != (N, N)) or not isinstance(self.connect,\n np.ndarray):\n ... | [
"0.60441834",
"0.5924442",
"0.5870361",
"0.56942517",
"0.5658238",
"0.56382203",
"0.5624735",
"0.5600587",
"0.5563595",
"0.55417067",
"0.54565823",
"0.5366081",
"0.5311197",
"0.53014123",
"0.5301123",
"0.52937174",
"0.5290408",
"0.5289225",
"0.52625626",
"0.52489024",
"0.5247... | 0.74189216 | 0 |
Test solar data handling from CC data file with clearsky ratio calculated using clearsky ratio from NSRDB h5 file. | def test_solar_cc():
features = ['clearsky_ratio', 'rsds', 'clearsky_ghi']
input_files = [os.path.join(TEST_DATA_DIR, 'rsds_test.nc')]
nsrdb_source_fp = os.path.join(TEST_DATA_DIR, 'test_nsrdb_co_2018.h5')
with xr.open_mfdataset(input_files) as fh:
min_lat = np.min(fh.lat.values)
min_lon = np.min(fh.lon.values) - 360
target = (min_lat, min_lon)
shape = (len(fh.lat.values), len(fh.lon.values))
with pytest.raises(AssertionError):
handler = DataHandlerNCforCC(input_files, features=features,
target=target, shape=shape,
val_split=0.0,
worker_kwargs=dict(max_workers=1))
handler = DataHandlerNCforCC(input_files, features=features,
nsrdb_source_fp=nsrdb_source_fp,
target=target, shape=shape,
temporal_slice=slice(0, 1),
val_split=0.0,
worker_kwargs=dict(max_workers=1))
cs_ratio = handler.data[..., 0]
ghi = handler.data[..., 1]
cs_ghi = handler.data[..., 2]
cs_ratio_truth = ghi / cs_ghi
assert cs_ratio.max() < 1
assert cs_ratio.min() > 0
assert (ghi < cs_ghi).all()
assert np.allclose(cs_ratio, cs_ratio_truth)
with Resource(nsrdb_source_fp) as res:
meta = res.meta
tree = KDTree(meta[['latitude', 'longitude']])
cs_ghi_true = res['clearsky_ghi']
# check a few sites against NSRDB source file
for i in range(4):
for j in range(4):
test_coord = handler.lat_lon[i, j]
_, inn = tree.query(test_coord)
assert np.allclose(cs_ghi_true[0:48, inn].mean(), cs_ghi[i, j]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', ... | [
"0.6232863",
"0.6190931",
"0.6065787",
"0.5965935",
"0.59624934",
"0.5858929",
"0.5856982",
"0.5837951",
"0.5831565",
"0.58128524",
"0.5789938",
"0.57884246",
"0.5777758",
"0.5774943",
"0.57337826",
"0.5721014",
"0.569534",
"0.5672289",
"0.56514597",
"0.5634904",
"0.5580489",... | 0.7407615 | 0 |
keys_to_track order is important! Matches will be tested in this order. | def __init__(self, keys_to_track):
self.keys_to_track = keys_to_track
self.tracker = {}
for key_to_track in self.keys_to_track:
self.tracker[key_to_track] = {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)",
"d... | [
"0.72052854",
"0.6073225",
"0.5684413",
"0.5646537",
"0.55177075",
"0.54479295",
"0.54022795",
"0.5286637",
"0.5255114",
"0.52513975",
"0.5236649",
"0.52352864",
"0.5173165",
"0.5170619",
"0.5168231",
"0.51618785",
"0.51518595",
"0.51467997",
"0.5143868",
"0.51270205",
"0.509... | 0.6227405 | 1 |
Add obj as a match for match_dict values. Checks to make sure match_dict keys are valid. | def add(self, obj, match_dict):
for match_key in match_dict.keys():
assert match_key in self.keys_to_track
for key_to_track in self.keys_to_track:
if match_dict.has_key(key_to_track):
match_val = match_dict[key_to_track]
if match_val is None or match_val == '':
pass
else:
self.tracker[key_to_track][match_val] = obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)",
"d... | [
"0.72251153",
"0.6223135",
"0.612522",
"0.6043846",
"0.6039093",
"0.5865672",
"0.5826964",
"0.5673978",
"0.5657533",
"0.56395197",
"0.5595575",
"0.55939347",
"0.5582442",
"0.55700904",
"0.55490994",
"0.54979956",
"0.5410391",
"0.5368418",
"0.536791",
"0.535772",
"0.5288205",
... | 0.84316427 | 0 |
Find a match using match_dict. Returns None if there is no match. Checks to make sure match_dict keys are valid. | def match(self, match_dict):
for match_key in match_dict.keys():
assert match_key in self.keys_to_track
for key_to_track in self.keys_to_track:
if match_dict.has_key(key_to_track):
match_val = match_dict[key_to_track]
if self.tracker[key_to_track].has_key(match_val):
return self.tracker[key_to_track][match_val]
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _find_match(needle: dict, haystack: list, keys: list):\n for item in haystack:\n for key in keys:\n if item.get(key) != needle[key]:\n break\n else:\n return item\n return None",
"def dict_match(d, key, default=None):\n\n if key in d and \"[\" not i... | [
"0.6650081",
"0.64368933",
"0.62785393",
"0.6271919",
"0.606207",
"0.60218126",
"0.6009384",
"0.59157956",
"0.5891576",
"0.586902",
"0.5831622",
"0.58111554",
"0.58071977",
"0.580484",
"0.57333195",
"0.5707258",
"0.5707258",
"0.57004094",
"0.5668807",
"0.561176",
"0.5579269",... | 0.8009377 | 0 |
Utility function to populate key_matcher from self.records. | def _add_matches(self):
for record in self.records:
match_dict={key_to_track: record.get(key_to_track)
for key_to_track in self.key_matcher.keys()}
self.key_matcher.add(obj=record,
match_dict=match_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def match_source_key(self, match):\n raise NotImplementedError",
"def __init__(self):\n self.key_to_record = {}\n self.mutation_to_key = {}\n self._innovation_key_generator = count(0)",
"def test_toofewkeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n ... | [
"0.5777237",
"0.5721886",
"0.5531455",
"0.548444",
"0.54103255",
"0.54072803",
"0.5384197",
"0.53711194",
"0.5370351",
"0.53101146",
"0.5296817",
"0.52691966",
"0.5252932",
"0.5242916",
"0.5215903",
"0.52110225",
"0.5207979",
"0.5202271",
"0.5188836",
"0.5164202",
"0.51598597... | 0.7259984 | 0 |
Check if the origin_imgs are flipped correctly. | def _check_flip(origin_imgs, result_imgs):
h, w, c = origin_imgs.shape
for i in range(h):
for j in range(w):
for k in range(c):
if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_flip(origin_imgs, result_imgs, flip_type):\n n, _, _, _ = np.shape(origin_imgs)\n if flip_type == 'horizontal':\n for i in range(n):\n if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):\n return False\n else:\n # yapf: disable\n for i in range(... | [
"0.7891626",
"0.7111249",
"0.588529",
"0.5814684",
"0.579397",
"0.5752564",
"0.5637771",
"0.5609224",
"0.5603559",
"0.5602122",
"0.55922",
"0.5581342",
"0.5511366",
"0.5511366",
"0.5508238",
"0.54916257",
"0.545488",
"0.54529166",
"0.5440112",
"0.5437185",
"0.5426509",
"0.5... | 0.8528314 | 0 |
Runs a single cycle of the sample collection. It should read the monitored file and extract all metrics. | def run_single_cycle(self, collector=None):
self._timestamp = int(time.time())
# There are certain error conditions, such as the system not supporting
# a particular proc file type, that we will never recover from. So,
# just always early exit.
if self._failed:
return {}
filename = self._file_pattern % self._pid
if not collector:
collector = {}
if self._file is None:
try:
self._file = open(filename, "r")
except IOError as e:
# We take a simple approach. If we don't find the file or
# don't have permissions for it, then just don't collect this
# stat from now on. If the user changes the configuration file
# we will try again to read the file then.
self._failed = True
if e.errno == errno.EACCES:
self._logger.error(
"The agent does not have permission to read %s. "
"Maybe you should run it as root.",
filename,
)
elif e.errno == errno.ENOENT:
self._logger.error(
(
"The agent cannot read %s. Your system may not support that proc file "
'type or the process with pid "%s" doesn\'t exist'
),
filename,
self._pid,
)
# Ignore 'process not found' errors (likely caused because the process exited
# but re-raise the exception for all other errors
elif e.errno != errno.ESRCH:
raise e
if self._file is not None:
try:
self._file.seek(0)
return self.gather_sample(self._file, collector=collector)
except IOError as e:
# log the error if the errno isn't 'process not found'. Process not found likely means the
# process exited, so we ignore that because it's within the realm of expected behaviour
if e.errno != errno.ESRCH:
self._logger.error(
"Error gathering sample for file: '%s'\n\t%s"
% (filename, six.text_type(e))
)
# close the file. This will cause the file to be reopened next call to run_single_cycle
self.close()
return collector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\r\n self.collect_data()",
"def gather_sample(self, my_file, collector=None):\n\n pass",
"def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n ... | [
"0.6506419",
"0.64478827",
"0.6249575",
"0.6136713",
"0.6083549",
"0.60691047",
"0.5937353",
"0.5804329",
"0.5789543",
"0.5710799",
"0.5666293",
"0.56556666",
"0.5654352",
"0.5652799",
"0.5633741",
"0.5620311",
"0.5596868",
"0.55945826",
"0.5588704",
"0.5581774",
"0.5576543",... | 0.71581453 | 0 |
Returns the number of centiseconds (1/100ths secs) for the given number of jiffies (a weird timing unit used the kernel). | def __calculate_time_cs(self, jiffies):
return int((jiffies * 100.0) / self._jiffies_per_sec) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_time_ms(self, jiffies):\n\n return int((jiffies * 1000.0) / self._jiffies_per_sec)",
"def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))",
"def _nsec_to_usec_round(nsec):\n return (nsec + 500) // 10 ** 3",
"def millis(): \n return int(round(mo... | [
"0.7653799",
"0.652349",
"0.6424848",
"0.6171948",
"0.6161103",
"0.6105997",
"0.5744971",
"0.56402147",
"0.5638903",
"0.5606995",
"0.5588372",
"0.5540272",
"0.5520585",
"0.5486425",
"0.54827213",
"0.5427575",
"0.5426606",
"0.5411524",
"0.54085886",
"0.54085886",
"0.54085886",... | 0.7872766 | 0 |
Returns the number of milliseconds for the given number of jiffies (a weird timing unit used the kernel). | def calculate_time_ms(self, jiffies):
return int((jiffies * 1000.0) / self._jiffies_per_sec) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __calculate_time_cs(self, jiffies):\n\n return int((jiffies * 100.0) / self._jiffies_per_sec)",
"def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))",
"def millis(): \n return int(round(monotonic.monotonic() * C.MILLISECONDS))",
"def millis(): \r\n re... | [
"0.73772615",
"0.7025039",
"0.6870207",
"0.6851476",
"0.6639381",
"0.6579558",
"0.6533067",
"0.63947713",
"0.6242466",
"0.61780435",
"0.6164305",
"0.6149434",
"0.61466753",
"0.6125577",
"0.61053866",
"0.6076043",
"0.6049589",
"0.60407495",
"0.60054356",
"0.59866726",
"0.59761... | 0.82893556 | 0 |
Gathers the metrics from the stat file. | def gather_sample(self, stat_file, collector=None):
if not collector:
collector = {}
# The file format is just a single line of all the fields.
line = stat_file.readlines()[0]
# Chop off first part which is the pid and executable file. The
# executable file is terminated with a paren so just search for that.
line = line[(line.find(") ") + 2) :]
fields = line.split()
# Then the fields we want are just at fixed field positions in the
# string. Just grab them.
# See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers
# Keep in mind that we chop first 3 values away (pid, command line, state), so you need to
# subtract 3 from the field numbers from the man page (e.g. on the man page nice is number
# 19, but in our case it's 16 aka 19 - 3)
process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(
int(fields[19])
)
collector.update(
{
Metric("app.cpu", "user"): self.__calculate_time_cs(int(fields[11])),
Metric("app.cpu", "system"): self.__calculate_time_cs(int(fields[12])),
Metric("app.uptime", None): process_uptime,
Metric("app.nice", None): float(fields[16]),
Metric("app.threads", None): int(fields[17]),
Metric("app.mem.majflt", None): int(fields[9]),
Metric("app.io.wait", None): int(fields[39])
if len(fields) >= 39
else 0,
}
)
return collector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # Each line has a format of:\n # Tag: Value\n #\n # We parse out all lines looking like that and match the stats we care about... | [
"0.6764025",
"0.67190164",
"0.6688609",
"0.65135384",
"0.6446264",
"0.6374985",
"0.63547957",
"0.6333468",
"0.63260734",
"0.6261242",
"0.6237093",
"0.6232534",
"0.61950576",
"0.618388",
"0.6182165",
"0.6152383",
"0.6111384",
"0.6086403",
"0.6057407",
"0.6038634",
"0.60113156"... | 0.7137697 | 0 |
Gathers the metrics from the sockstat file. | def gather_sample(self, stat_file, collector=None):
if not collector:
collector = {}
for line in stat_file:
# We just look for the different "inuse" lines and output their
# socket type along with the count.
m = re.search(r"(\w+): inuse (\d+)", line)
if m is not None:
collector.update(
{
Metric("app.net.sockets_in_use", m.group(1).lower()): int(
m.group(2)
)
}
)
return collector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gather_sample(self, stat_file, collector=None):\n\n # This file format is weird. Each set of stats is outputted in two\n # lines. First, a header line that list the field names. Then a\n # a value line where each value is specified in the appropriate column.\n # You have to match... | [
"0.6393384",
"0.6306739",
"0.60562605",
"0.6037875",
"0.597491",
"0.59416795",
"0.5933875",
"0.5916901",
"0.57845867",
"0.577276",
"0.5749845",
"0.571024",
"0.5700701",
"0.56980515",
"0.5637241",
"0.5629849",
"0.56119055",
"0.55269897",
"0.5518267",
"0.55144465",
"0.5510193",... | 0.71067053 | 0 |
Collects the metrics from the gathers | def collect(self):
collector = {}
for gather in self.gathers:
try:
stats = gather.run_single_cycle(collector=collector)
if stats:
collector.update(stats)
except Exception as ex:
self._logger.exception(
"Exception while collecting metrics for PID: %s of type: %s. Details: %s",
self.pid,
type(gather),
repr(ex),
)
return collector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = t... | [
"0.6958763",
"0.69119155",
"0.68189895",
"0.66842306",
"0.66356134",
"0.6568736",
"0.6555862",
"0.6482938",
"0.64655095",
"0.64260054",
"0.641891",
"0.63836133",
"0.63763535",
"0.62167543",
"0.62135196",
"0.62097865",
"0.6177793",
"0.61742324",
"0.61667204",
"0.61239713",
"0.... | 0.7913655 | 0 |
Return the process of the agent. | def current_process(self):
return self._current_process | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _launchAgentProcess( self ):\n return subprocess.Popen( [ sys.executable, os.path.join( sys.path[0], 'agentProcess.py' ), str( _processPid ) ], stdin=subprocess.PIPE, stdout=subprocess.PIPE )",
"def get_process(self, pid):\n return self.processes.get(pid, None)",
"def get_my_process():\n r... | [
"0.69227177",
"0.68076235",
"0.66409147",
"0.65917194",
"0.6577045",
"0.65301675",
"0.6481512",
"0.6463723",
"0.6425003",
"0.639765",
"0.6345163",
"0.6329305",
"0.63107604",
"0.63107604",
"0.6309831",
"0.6220328",
"0.6186098",
"0.61813504",
"0.6084275",
"0.6083225",
"0.605136... | 0.69763553 | 0 |
Given a process id, return all children processes (recursively) | def get_child_processes(self, ppid):
all_children = []
children_to_explore = set()
for _pid in self.parent_to_children_map[ppid]:
all_children.append(_pid)
children_to_explore.add(_pid)
# get the children 'recursively'
while children_to_explore: # the invariant
child_to_explore = children_to_explore.pop()
if not self.parent_to_children_map.get(child_to_explore):
continue
unvisited = self.parent_to_children_map[child_to_explore]
for node in unvisited:
if node not in all_children:
children_to_explore.add(node)
all_children.append(node)
return list(set(all_children)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_children(ppid):\n\n pid_dct = {}\n for proc in build_process_list():\n proc[\"_children\"] = []\n pid_dct[proc[\"pid\"]] = proc\n\n # fill in children array\n for pid in list(pid_dct.keys()):\n parent_pid = pid_dct[pid][\"parent_pid\"]\n\n if parent_pid in pid_dct:\n... | [
"0.8440308",
"0.78711176",
"0.7738845",
"0.73896194",
"0.73563373",
"0.7110651",
"0.6810038",
"0.66680056",
"0.651955",
"0.6309715",
"0.62305254",
"0.603989",
"0.59731925",
"0.5933038",
"0.5905366",
"0.5880942",
"0.5819915",
"0.57845265",
"0.5777258",
"0.5750268",
"0.574295",... | 0.7898279 | 1 |
Returns a list of all running process ids | def get_running_processes(self):
all_processes = []
for _process in self.processes:
all_processes.append(_process["pid"])
return all_processes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids",
"def pids(self):\n return self._pidToProcess.iterkeys()",
"def running_procs(self) -> List[int]:\n return [p.model_id for p in self.primary_scheduler.queue_nodes.run_q]",
"def getActivePr... | [
"0.7804924",
"0.7730347",
"0.7697995",
"0.7590113",
"0.7520795",
"0.7483522",
"0.7411062",
"0.7385953",
"0.7380598",
"0.73299384",
"0.73251057",
"0.7316895",
"0.7233392",
"0.7152342",
"0.71504444",
"0.71448386",
"0.7116604",
"0.7027047",
"0.69536257",
"0.6934355",
"0.6899609"... | 0.813076 | 0 |
Like get_matches_commandline method, given a string, match the processes on the name but also returns the matched processes' children | def get_matches_commandline_with_children(self, match_pattern):
matched_pids = self.get_matches_commandline(match_pattern)
for matched_pid in matched_pids:
matched_pids.extend(self.get_child_processes(matched_pid))
return list(set(matched_pids)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_matches_commandline(self, match_pattern):\n\n matches = []\n for _process in self.processes:\n if re.search(match_pattern, _process[\"cmd\"]):\n matches.append(_process[\"pid\"])\n return matches",
"def find(name, arg=None):\r\n for p in get_processes():\... | [
"0.69588715",
"0.63922125",
"0.61809975",
"0.6156374",
"0.6085242",
"0.602419",
"0.59251225",
"0.5850459",
"0.58301437",
"0.58174837",
"0.58165544",
"0.5804579",
"0.58037686",
"0.57153285",
"0.56987065",
"0.5696199",
"0.5649648",
"0.5642261",
"0.55726624",
"0.5563265",
"0.555... | 0.75858635 | 0 |
For a process, record the metrics in a historical metrics collector Collects the historical result of each metric per process in __metrics_history | def record_metrics(self, pid, metrics):
for _metric, _metric_value in metrics.items():
if not self.__metrics_history[pid].get(_metric):
self.__metrics_history[pid][_metric] = []
self.__metrics_history[pid][_metric].append(_metric_value)
# only keep the last 2 running history for any metric
self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][
-2:
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is u... | [
"0.6638309",
"0.61203325",
"0.6019831",
"0.59060955",
"0.59058285",
"0.55844504",
"0.54852",
"0.54657155",
"0.53771794",
"0.53647846",
"0.5356617",
"0.5345713",
"0.5341496",
"0.53098404",
"0.52967745",
"0.5279151",
"0.5278557",
"0.52188367",
"0.52059686",
"0.51821625",
"0.517... | 0.7013191 | 0 |
At the beginning of each process metric calculation, the absolute (noncumulative) metrics need to be overwritten to the combined process(es) result. Only the cumulative metrics need the previous value to calculate delta. We should set the absolute metric to 0 in the beginning of this "epoch" | def _reset_absolute_metrics(self):
for pid, process_metrics in self.__metrics_history.items():
for _metric, _metric_values in process_metrics.items():
if not _metric.is_cumulative:
self.__aggregated_metrics[_metric] = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is u... | [
"0.66141224",
"0.5771144",
"0.57508063",
"0.5575068",
"0.5574594",
"0.55501413",
"0.5535069",
"0.54281497",
"0.53845894",
"0.535291",
"0.5337103",
"0.5333742",
"0.5300862",
"0.5274768",
"0.5269434",
"0.52690667",
"0.52612674",
"0.5259409",
"0.52576226",
"0.5246559",
"0.524457... | 0.75371194 | 0 |
Calculates the aggregated metric values based on the current running processes and the historical metric record | def _calculate_aggregated_metrics(self):
# using the historical values, calculate the aggregate
# there are two kinds of metrics:
# a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)
# b) absolute metrics - the last absolute value is used
running_pids_set = set(self.__pids)
for pid, process_metrics in self.__metrics_history.items():
for _metric, _metric_values in process_metrics.items():
if not self.__aggregated_metrics.get(_metric):
self.__aggregated_metrics[_metric] = 0
if _metric.is_cumulative:
if pid in running_pids_set:
if len(_metric_values) > 1:
# only report the cumulative metrics for more than one sample
self.__aggregated_metrics[_metric] += (
_metric_values[-1] - _metric_values[-2]
)
else:
if pid in running_pids_set:
# absolute metric - accumulate the last reported value
self.__aggregated_metrics[_metric] += _metric_values[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_metrics(self):\n pass",
"def _reset_absolute_metrics(self):\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not _metric.is_cumulative:\n self.__aggregated_metrics[... | [
"0.65272045",
"0.6503945",
"0.6044549",
"0.5962655",
"0.5960727",
"0.5932751",
"0.58754987",
"0.5782237",
"0.5775919",
"0.5757956",
"0.57359225",
"0.57347035",
"0.5726887",
"0.57170856",
"0.57170856",
"0.56863284",
"0.56776404",
"0.56517655",
"0.56427175",
"0.5625753",
"0.561... | 0.8260474 | 0 |
Collect the perprocess tracker for the monitored process(es). | def gather_sample(self):
for _pid in self._select_processes():
if not self.__trackers.get(_pid):
self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)
self._reset_absolute_metrics()
for _tracker in self.__trackers.values():
_metrics = _tracker.collect()
self.record_metrics(_tracker.pid, _metrics)
self._calculate_aggregated_metrics()
self._remove_dead_processes()
self.print_metrics() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_in... | [
"0.70074445",
"0.70008403",
"0.65227914",
"0.6477468",
"0.64733964",
"0.6447299",
"0.64127636",
"0.6285023",
"0.6207518",
"0.6167097",
"0.6145146",
"0.61055756",
"0.6044723",
"0.6031808",
"0.5893498",
"0.58866787",
"0.58456194",
"0.5834667",
"0.5807094",
"0.5745234",
"0.57324... | 0.7125841 | 0 |
Set the PID of the process that was marked as $$TBD. | def set_pid(self, pid): # type: (int) -> None
for i in range(len(self.__target_pids)):
if self.__target_pids[i] == "$$TBD":
self.__target_pids[i] = pid
break | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def def_pid(self,pid):\n self.pid=int(pid)",
"def pid(self, pid):\n\n self._pid = pid",
"def pid(self, pid):\n\n self._pid = pid",
"def _update_PID(self):\n self.pid = PID(p=self.paramP, i=self.paramI, d=self.paramD, setpoint=self.voltageSetpoint, memory=self.paramMemory)",
"def... | [
"0.6045698",
"0.6005458",
"0.6005458",
"0.5414311",
"0.5351061",
"0.5235025",
"0.5235025",
"0.52202576",
"0.52202576",
"0.51928914",
"0.51871926",
"0.50754386",
"0.5040368",
"0.5033574",
"0.49666995",
"0.49178597",
"0.49152836",
"0.49008197",
"0.4899533",
"0.48833144",
"0.488... | 0.79088074 | 0 |
Get the probability of a word following a context. i.e. The conditional probability P(word|context) | def prob(self, word, context=None):
if not context:
context = ()
else:
context = tuple(context)
prob = 0
for i in range(len(context) + 1):
prob += self.weights[i] * self.ngram_cpd[context[i:]][word]
return prob | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prob(self, word, context):\n\n context = tuple(context)\n \n context_lenth = len(context) \n if context_lenth == 0:\n line = ''\n elif context_lenth == 1:\n line = context[0]\n elif context_lenth >= 2:\n line = context[0]\n f... | [
"0.8106743",
"0.75592023",
"0.74971807",
"0.7463757",
"0.7168955",
"0.7168955",
"0.7081482",
"0.707541",
"0.69993883",
"0.69632804",
"0.6952478",
"0.6929444",
"0.6909919",
"0.67921746",
"0.6687317",
"0.6680557",
"0.66741383",
"0.66102266",
"0.6583568",
"0.6526557",
"0.6460864... | 0.8385041 | 0 |
YOLOV3 network hybrid forward. | def hybrid_forward(self, F, x, *args):
all_box_centers = []
all_box_scales = []
all_objectness = []
all_class_pred = []
all_anchors = []
all_offsets = []
all_feat_maps = []
all_detections = []
routes = []
for stage, block, output in zip(self.stages, self.yolo_blocks, self.yolo_outputs):
x = stage(x)
routes.append(x)
# the YOLO output layers are used in reverse order, i.e., from very deep layers to shallow
for i, block, output in zip(range(len(routes)), self.yolo_blocks, self.yolo_outputs):
x, tip = block(x)
if autograd.is_training():
dets, box_centers, box_scales, objness, class_pred, anchors, offsets = output(tip)
all_box_centers.append(box_centers.reshape((0, -3, -1)))
all_box_scales.append(box_scales.reshape((0, -3, -1)))
all_objectness.append(objness.reshape((0, -3, -1)))
all_class_pred.append(class_pred.reshape((0, -3, -1)))
all_anchors.append(anchors)
all_offsets.append(offsets)
# here we use fake featmap to reduce memory consuption, only shape[2, 3] is used
fake_featmap = F.zeros_like(tip.slice_axis(
axis=0, begin=0, end=1).slice_axis(axis=1, begin=0, end=1))
all_feat_maps.append(fake_featmap)
else:
dets = output(tip)
all_detections.append(dets)
if i >= len(routes) - 1:
break
# add transition layers
x = self.transitions[i](x)
# upsample feature map reverse to shallow layers
upsample = _upsample(x, stride=2)
route_now = routes[::-1][i + 1]
x = F.concat(F.slice_like(upsample, route_now * 0, axes=(2, 3)), route_now, dim=1)
if autograd.is_training():
# during training, the network behaves differently since we don't need detection results
if autograd.is_recording():
# generate losses and return them directly
box_preds = F.concat(*all_detections, dim=1)
all_preds = [F.concat(*p, dim=1) for p in [
all_objectness, all_box_centers, all_box_scales, all_class_pred]]
all_targets = self._target_generator(box_preds, *args)
return self._loss(*(all_preds + all_targets))
# return raw predictions, this is only used in DataLoader transform function.
return (F.concat(*all_detections, dim=1), all_anchors, all_offsets, all_feat_maps,
F.concat(*all_box_centers, dim=1), F.concat(*all_box_scales, dim=1),
F.concat(*all_objectness, dim=1), F.concat(*all_class_pred, dim=1))
# concat all detection results from different stages
result = F.concat(*all_detections, dim=1)
# apply nms per class
if self.nms_thresh > 0 and self.nms_thresh < 1:
result = F.contrib.box_nms(
result, overlap_thresh=self.nms_thresh, valid_thresh=0.01,
topk=self.nms_topk, id_index=0, score_index=1, coord_start=2, force_suppress=False)
if self.post_nms > 0:
result = result.slice_axis(axis=1, begin=0, end=self.post_nms)
ids = result.slice_axis(axis=-1, begin=0, end=1)
scores = result.slice_axis(axis=-1, begin=1, end=2)
bboxes = result.slice_axis(axis=-1, begin=2, end=None)
return ids, scores, bboxes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in sel... | [
"0.65314513",
"0.6292418",
"0.6263829",
"0.6261159",
"0.62541807",
"0.6251553",
"0.6219152",
"0.62162906",
"0.61927277",
"0.6178997",
"0.6141839",
"0.6136382",
"0.6127676",
"0.6119838",
"0.6111306",
"0.61022437",
"0.6099669",
"0.60930973",
"0.60845137",
"0.60679704",
"0.60615... | 0.7524717 | 0 |
Set nonmaximum suppression parameters. | def set_nms(self, nms_thresh=0.45, nms_topk=400, post_nms=100):
self._clear_cached_op()
self.nms_thresh = nms_thresh
self.nms_topk = nms_topk
self.post_nms = post_nms | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})",
"def non_maxima_suppression(boxes, probs, classes_num, thr=0.2):\n for i, box in enumerate(boxes):\n ... | [
"0.5966529",
"0.55709445",
"0.55309284",
"0.5446378",
"0.5345884",
"0.5345884",
"0.5295788",
"0.52417874",
"0.5239064",
"0.52170867",
"0.52112365",
"0.520788",
"0.5186509",
"0.51748765",
"0.51727885",
"0.5166071",
"0.510891",
"0.50601727",
"0.5058307",
"0.50549585",
"0.504755... | 0.59055877 | 1 |
Reset class categories and class predictors. | def reset_class(self, classes):
self._clear_cached_op()
self._classes = classes
if self._pos_iou_thresh >= 1:
self._target_generator = YOLOV3TargetMerger(len(classes), self._ignore_iou_thresh)
for outputs in self.yolo_outputs:
outputs.reset_class(classes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n self.pred_classes.clear()\n self.gold_classes.clear()\n self.pred_probas.clear()\n self.gold_probas.clear()\n self.loss = 0\n self.nb_batches = 0\n self.prec_rec_f1 = None\n self.acc = None\n self.mcc = None",
"def reset(self):\n ... | [
"0.72172534",
"0.6899433",
"0.6748963",
"0.6726632",
"0.66773576",
"0.6629043",
"0.6529936",
"0.6484326",
"0.6478405",
"0.6478405",
"0.6471619",
"0.6466328",
"0.6457058",
"0.62709737",
"0.6202787",
"0.61770207",
"0.6149169",
"0.612528",
"0.60959744",
"0.6091237",
"0.6087531",... | 0.70744306 | 1 |
YOLO3 multiscale with darknet53 base network on VOC dataset. | def yolo3_darknet53_voc(pretrained_base=True, pretrained=False, num_sync_bn_devices=-1, **kwargs):
from ...data import VOCDetection
pretrained_base = False if pretrained else pretrained_base
base_net = darknet53(
pretrained=pretrained_base, num_sync_bn_devices=num_sync_bn_devices, **kwargs)
stages = [base_net.features[:15], base_net.features[15:24], base_net.features[24:]]
anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
strides = [8, 16, 32]
classes = VOCDetection.CLASSES
return get_yolov3(
'darknet53', stages, [512, 256, 128], anchors, strides, classes, 'voc',
pretrained=pretrained, num_sync_bn_devices=num_sync_bn_devices, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n # TODO\n self.confThreshold = 0.6\n self.nmsThreshold = 0.5\n self.inpWidth = 320\n self.inpHeight = 320\n classesFile = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/coco.names\"\n self.classes = None\n with open(cla... | [
"0.6369418",
"0.6306215",
"0.6232091",
"0.6182974",
"0.60732025",
"0.5921903",
"0.5916691",
"0.5855269",
"0.5853199",
"0.5839653",
"0.57973075",
"0.5778595",
"0.574271",
"0.573733",
"0.57194585",
"0.5616862",
"0.55473757",
"0.5541811",
"0.554155",
"0.5500795",
"0.5457074",
... | 0.6497924 | 0 |
The uri returned from request.uri is not properly urlencoded (sometimes it's partially urldecoded) This is a weird hack to get werkzeug to return the proper urlencoded string uri | def _get_uri_from_request(request):
uri = request.base_url
if request.query_string:
uri += '?' + request.query_string.decode('utf-8')
return uri | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _urlnorm(self, uri):\r\n (scheme, authority, path, query, fragment) = parse_uri(uri)\r\n if not scheme or not authority:\r\n raise Exception(\"Only absolute URIs are allowed. uri = %s\" % uri)\r\n authority = authority.lower()\r\n scheme = scheme.lower()\r\n if not... | [
"0.67314285",
"0.65002495",
"0.6443756",
"0.6264452",
"0.6258882",
"0.6235578",
"0.6230169",
"0.6225036",
"0.61642367",
"0.6159104",
"0.61166435",
"0.6091703",
"0.60887986",
"0.6040494",
"0.602881",
"0.6023126",
"0.60162497",
"0.6009078",
"0.6009078",
"0.5996785",
"0.5991015"... | 0.68531525 | 0 |
Visualize a particular column of Y_pred anf Y_test for a particular series | def visualize_pred(y_test, y_pred, test_seq, window_out, num_plots, num_win_ser, cols_y, col_idx):
ser_idx = [i for i in range(0, len(y_test), num_win_ser)]
if num_plots > len(ser_idx):
print("Too many plots, reduce the mumber")
else:
indx = ser_idx[0:num_plots]
days = range(num_win_ser)
for idx in indx:
CR = test_seq[idx][0][0][3]
pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx]
true = y_test[idx : idx+num_win_ser, window_out -1, col_idx]
plt.title("Y_True V/S Y_Pred, CR: "+ str(CR))
plt.xlabel('Days')
plt.ylabel(cols_y[col_idx])
plt.plot(days, pred, label = 'Pred')
plt.plot(days, true, label = 'True')
plt.legend()
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def actual_pred_plot(preds):\r\n actual_pred = pd.DataFrame(columns=['Cost', 'prediction'])\r\n actual_pred['Cost'] = all_data['2020':].iloc[:, -1][1:len(preds) + 1]\r\n actual_pred['prediction'] = preds[:, -1]\r\n\r\n from keras.metrics import MeanSquaredError\r\n m = MeanSquaredError()\r\n m.up... | [
"0.6575989",
"0.65215456",
"0.6404881",
"0.62492937",
"0.6163107",
"0.61196136",
"0.60816854",
"0.6080583",
"0.6039545",
"0.6038659",
"0.6025292",
"0.60227543",
"0.6006382",
"0.59930116",
"0.5945069",
"0.5939615",
"0.5928846",
"0.5928846",
"0.5887865",
"0.5881625",
"0.5878290... | 0.6889194 | 0 |
Test the AioBaseTurtle._calc_move function | def test_calc_move(self):
t = AioBaseTurtle()
t.speed(speed=5)
steps, delta = t._calc_move(Vec2D(0, 100))
self.assertEqual(steps, 20)
self.assertAlmostEqual(delta[0], 0.0)
self.assertAlmostEqual(delta[1], 5.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_move_step(self):\n t = AioBaseTurtle()\n t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5))\n self.assertAlmostEqual(t._position[0], 100)\n self.assertAlmostEqual(t._position[1], 100)\n t.screen._drawline.assert_called_once_with(\n t.currentLineItem,\n (... | [
"0.77467954",
"0.67991954",
"0.67526376",
"0.6682421",
"0.6611246",
"0.66024905",
"0.65450394",
"0.65094215",
"0.6458481",
"0.6436995",
"0.6424645",
"0.64221406",
"0.6376048",
"0.63608104",
"0.6327564",
"0.63082105",
"0.6292444",
"0.62719584",
"0.62709236",
"0.62575185",
"0.6... | 0.88943046 | 0 |
Test the AioBaseTurtle._calc_rotation function | def test_calc_rotation(self):
t = AioBaseTurtle()
t.speed(speed=2)
orient, steps, delta = t._calc_rotation(120)
self.assertEqual(steps, 21)
self.assertAlmostEqual(delta, 120.0 / 21.0)
self.assertAlmostEqual(orient[0], math.cos(math.radians(120)))
self.assertAlmostEqual(orient[1], math.sin(math.radians(120))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_rotation_angle(self):\n\n self.test_shape.azimuth_placement_angle = [45, 135, 225, 315]\n test_volume = self.test_shape.volume()\n self.test_shape.rotation_angle = 180\n assert self.test_shape.volume() == pytest.approx(test_volume * 0.5)",
"def test_rotation(self, tol):\n ... | [
"0.72649866",
"0.71997005",
"0.7070056",
"0.6858857",
"0.67564845",
"0.6559771",
"0.65583205",
"0.6543984",
"0.65229213",
"0.6519182",
"0.64923644",
"0.64011544",
"0.63578784",
"0.632081",
"0.62979436",
"0.62914723",
"0.62909883",
"0.62908113",
"0.6243588",
"0.62338036",
"0.6... | 0.91761756 | 0 |
Test the AioBaseTurtle._calc_circle function | def test_calc_circle(self):
t = AioBaseTurtle()
steps, step_len, rot_step = t._calc_circle(100, extent=180)
self.assertEqual(steps, 14)
self.assertAlmostEqual(rot_step, 180.0 / 14.0)
self.assertAlmostEqual(step_len, 22.3928952207) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_circle(c):\n turtle.circle(c.radius)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def GetCircle(circle):\r\n pass",
"def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radius\n... | [
"0.734142",
"0.734142",
"0.72636837",
"0.703413",
"0.6813236",
"0.6676832",
"0.6654256",
"0.66349846",
"0.6573545",
"0.65500736",
"0.6521824",
"0.6442423",
"0.64218247",
"0.6412573",
"0.63668287",
"0.63393223",
"0.6279476",
"0.6262953",
"0.6237789",
"0.6233661",
"0.6233039",
... | 0.8547868 | 0 |
Test the AioBaseTurtle._move_step function | def test_move_step(self):
t = AioBaseTurtle()
t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5))
self.assertAlmostEqual(t._position[0], 100)
self.assertAlmostEqual(t._position[1], 100)
t.screen._drawline.assert_called_once_with(
t.currentLineItem,
((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position
"black",
1,
False
)
self.mock_update.assert_called_once_with() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def step(self, move):",
"def test_calc_move(self):\n t = AioBaseTurtle()\n t.speed(speed=5)\n steps, delta = t._calc_move(Vec2D(0, 100))\n self.assertEqual(steps, 20)\n self.assertAlmostEqual(delta[0], 0.0)\n self.assertAlmostEqual(delta[1], 5.0)",
"def move(self, dire... | [
"0.7820133",
"0.7763472",
"0.6788755",
"0.6762144",
"0.6700953",
"0.66414285",
"0.6528104",
"0.651918",
"0.6467042",
"0.644896",
"0.6419937",
"0.639203",
"0.637763",
"0.63649815",
"0.6354341",
"0.63443154",
"0.63338375",
"0.63002443",
"0.6289629",
"0.62895745",
"0.62881094",
... | 0.8622489 | 0 |
Temporarily overwrite the settings with test settings. This allows to use test datasets for testing. | def generate_test_settings(tmpdir, dataset):
# When `tmpdir` is a path convert it to a string
if isinstance(tmpdir, py._path.local.LocalPath):
tmpdir = str(tmpdir)
test_settings = {
'datasets': {
'mnist': {
'train': {
'images': "file://" + tmpdir + "/" + dataset + "/server/train-images-idx3-ubyte.gz",
'labels': "file://" + tmpdir + "/" + dataset + "/server/train-labels-idx1-ubyte.gz"
},
'test': {
'images': "file://" + tmpdir + "/" + dataset + "/server/t10k-images-idx3-ubyte.gz",
'labels': "file://" + tmpdir + "/" + dataset + "/server/t10k-labels-idx1-ubyte.gz"
},
},
},
'data-dir': tmpdir + "/" + dataset + "/data"
}
overwrite_settings(test_settings) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def force_test_setting(dm, tsm, output_path):\n if dm is not None:\n data_json_path = os.path.join(output_path, 'cur_data_setting.json')\n dm.data_par['datapro']['dataset']['prepare_data'] = False\n dm.data_par['datapro']['reg']['max_num_for_loading'] = [1, 1, -1, 1]\n dm.save(data_j... | [
"0.69389164",
"0.6621101",
"0.6452525",
"0.64237857",
"0.64121604",
"0.6407746",
"0.6387266",
"0.6371625",
"0.6295202",
"0.6287137",
"0.6241228",
"0.6236511",
"0.62239265",
"0.6192211",
"0.6178279",
"0.61352205",
"0.6134868",
"0.6040336",
"0.6021929",
"0.6021816",
"0.6011449"... | 0.6982843 | 0 |
Generate archive files for the given test dataset in tmpdir | def generate_test_dataset_archive(filepath, dataset):
# 'file:///some/path' to '/some/path'
if filepath[:7] == 'file://':
filepath = filepath[7:]
# Check if the dataset exists.
# When not been generate it.
if not os.path.isfile(filepath):
print("Generating", filepath)
data = get_test_dataset(dataset)
ensure_dir(os.path.dirname(filepath))
idxgz.save(filepath, data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_test_environment(tmpdir, dataset):\n\n # Overwrite settings with test settings\n generate_test_settings(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n \n for dstype in ['images', 'labels']:\n \n dataset_type = usage... | [
"0.7358341",
"0.6791461",
"0.6418583",
"0.63508826",
"0.63468665",
"0.6301004",
"0.6291766",
"0.61660707",
"0.6096657",
"0.6091523",
"0.60853094",
"0.6035293",
"0.602757",
"0.59755576",
"0.595753",
"0.59322333",
"0.5913556",
"0.5871974",
"0.58634",
"0.5859327",
"0.58383375",
... | 0.75224614 | 0 |
Generate a test environment using the given dataset. The settings are temporarily overwritten to use the test data. | def generate_test_environment(tmpdir, dataset):
# Overwrite settings with test settings
generate_test_settings(tmpdir, dataset)
# Generate the archive files
for usage in ['train', 'test']:
for dstype in ['images', 'labels']:
dataset_type = usage + '.' + dstype
mnist_dataset = 'datasets.mnist.' + dataset_type
filepath = get_setting(mnist_dataset)
test_dataset = dataset + '.' + dataset_type
generate_test_dataset_archive(filepath, test_dataset) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_environment(dataset, tmpdir):\n\n print(\">>> Test environment:\")\n print(\"dataset:\", dataset)\n print(\"tmpdir:\", tmpdir)\n\n generate_test_environment(tmpdir, dataset)\n\n return { 'dataset': dataset, 'tmpdir': tmpdir }",
"def test_generate_test_environment(dataset):\n\n prin... | [
"0.7599391",
"0.72667193",
"0.69813424",
"0.66441184",
"0.64025056",
"0.6268748",
"0.625678",
"0.6244715",
"0.61173195",
"0.60907793",
"0.60787404",
"0.60093623",
"0.5926749",
"0.5911557",
"0.5894079",
"0.58914727",
"0.58914727",
"0.58914727",
"0.58914727",
"0.58877015",
"0.5... | 0.7804764 | 0 |
Extracts (typically) overlapping regular patches from a grayscale image Changing the offset and stride parameters will result in images reconstructed by reconstruct_from_grayscale_patches having different dimensions! Callers should pad and unpad as necessary! | def extract_grayscale_patches( img, shape, offset=(0,0), stride=(1,1) ):
px, py = np.meshgrid( np.arange(shape[1]),np.arange(shape[0]))
l, t = np.meshgrid(
np.arange(offset[1],img.shape[1]-shape[1]+1,stride[1]),
np.arange(offset[0],img.shape[0]-shape[0]+1,stride[0]) )
l = l.ravel()
t = t.ravel()
x = np.tile( px[None,:,:], (t.size,1,1)) + np.tile( l[:,None,None], (1,shape[0],shape[1]))
y = np.tile( py[None,:,:], (t.size,1,1)) + np.tile( t[:,None,None], (1,shape[0],shape[1]))
return img[y.ravel(),x.ravel()].reshape((t.size,shape[0],shape[1])), (t,l) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _extract_patches_and_positions_from_image(\n image, patch_size, patch_stride, hse_grid_size,\n n_crops, h, w,\n c, scale_id, max_seq_len):\n p = tf.image.extract_patches(\n image, [1, patch_size, patch_size, 1], [1, patch_stride, patch_stride, 1],\n [1, 1, 1, 1],\n padding='SAME')\n\n ... | [
"0.69615763",
"0.6910621",
"0.6902872",
"0.6756077",
"0.6684931",
"0.6669656",
"0.66367173",
"0.66181797",
"0.6613878",
"0.6597331",
"0.65837264",
"0.65633184",
"0.65623057",
"0.652115",
"0.6456871",
"0.6398756",
"0.6369198",
"0.6359843",
"0.63577175",
"0.6351932",
"0.6345157... | 0.7973168 | 0 |
assert json schema for requests from api.openweathermap.org | def validate_schema_openweathermap(self, actual, schema):
resources_dir = os.path.abspath(os.getcwd())
relative_schema_path = valid_json_schema if schema == 'Valid' else error_json_schema
schema_data = open(os.path.join(resources_dir, relative_schema_path))
self.validate_schema(actual, json.load(schema_data))
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_complete_data_schema(self):\n response = self.client.get(self.url)\n data = response.data\n self.assertIn('id', data)\n self.assertIn('title', data)\n self.assertIn('release_year', data)\n self.assertIn('casting', data)\n self.assertIn('directors', data)\n ... | [
"0.62621653",
"0.6240114",
"0.6214563",
"0.6125648",
"0.5942485",
"0.5931073",
"0.575438",
"0.57441986",
"0.57291126",
"0.57290787",
"0.57119447",
"0.56926596",
"0.5668845",
"0.5663258",
"0.5649895",
"0.5649895",
"0.5649045",
"0.56264323",
"0.5599427",
"0.5597548",
"0.5595037... | 0.6302012 | 0 |
Count the number of nonempty dicts/lists or other objects | def recursive_count(o):
if isinstance(o, dict):
c = 0
for v in o.values():
c += recursive_count(v)
return c
elif isinstance(o, list):
c = 0
for v in o:
c += recursive_count(v)
return c
else:
return 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count",
"def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is... | [
"0.71943367",
"0.71715474",
"0.69646627",
"0.6960094",
"0.6950782",
"0.6901174",
"0.6860939",
"0.6854527",
"0.68178976",
"0.6807361",
"0.6695624",
"0.6688045",
"0.6622141",
"0.6599806",
"0.6558737",
"0.6528312",
"0.6516975",
"0.64872235",
"0.6447478",
"0.6436087",
"0.641087",... | 0.7272351 | 0 |
Returns a list formed by the evaluation types present in criteria. | def get_evaluation_analysis_types(self, parameters):
eval_types =[]
for evaluation_criteria_id in parameters["clustering"]["evaluation"]["evaluation_criteria"]:
# for subcriteria in parameters["clustering"]["evaluation"]["evaluation_criteria"][evaluation_criteria_id]:
# eval_types.append(subcriteria)
eval_types.extend(parameters["clustering"]["evaluation"]["evaluation_criteria"][evaluation_criteria_id].keys())
return list(set(eval_types)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_query_and_evaluation_analysis_types(self, parameters):\n queries = parameters[\"clustering\"][\"evaluation\"][\"query_types\"]\n queries.extend(AnalysisPopulator.get_evaluation_analysis_types(parameters))\n return list(set(queries))",
"def getResultDefs(self, type=None):\n res... | [
"0.67036015",
"0.615326",
"0.59587413",
"0.5896585",
"0.5799256",
"0.5777026",
"0.5747461",
"0.5745",
"0.5661408",
"0.5641381",
"0.5631738",
"0.551762",
"0.551464",
"0.5496893",
"0.5494026",
"0.54865164",
"0.54589295",
"0.5457875",
"0.5433231",
"0.54234606",
"0.5407233",
"0... | 0.76133484 | 0 |
Returns the 'details' field of a clustering. | def analysis_function_details(self,clustering):
return clustering.details | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_details(self):\n return self.details",
"def get_details(self):\n return self.details",
"def get_details(self):\n return self.details",
"def details(self) -> \"dict\":\n return self._attrs.get(\"details\")",
"def details(self):\n return self._details",
"def detai... | [
"0.6636939",
"0.6636939",
"0.6636939",
"0.6543469",
"0.64725894",
"0.63629097",
"0.62274104",
"0.6106484",
"0.60739094",
"0.6035391",
"0.6035391",
"0.60135996",
"0.6000487",
"0.5963064",
"0.59448",
"0.5934605",
"0.5902071",
"0.5895396",
"0.58460885",
"0.5807865",
"0.57522964"... | 0.7595597 | 0 |
Returns the number of elements that are clusterized in this clustering (which may not be the total number of elements of the dataset if there were noisy elements) | def analysis_function_total_elements(self,clustering):
return clustering.total_number_of_elements | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def n_clusters(self):\n return len(self.clusters)",
"def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())",
"def count_elements_in_dataset(dataset):\n return dataset.count()",
"def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.pu... | [
"0.7475261",
"0.73660713",
"0.7281601",
"0.7209829",
"0.7184535",
"0.71797055",
"0.71275187",
"0.7005667",
"0.6999668",
"0.69551873",
"0.68624055",
"0.68511283",
"0.67983764",
"0.6792057",
"0.6780994",
"0.6774845",
"0.6771099",
"0.66886616",
"0.66632557",
"0.66320395",
"0.661... | 0.74841356 | 0 |
Returns the percentage of elements of the clustering that are in the 4 bigger clusters. | def analysis_function_top_4(self,clustering):
clustering.sort_clusters_by_size()
total = 0
percents = clustering.get_population_percent_of_n_bigger_clusters(4)
for p in percents:
total = total+p
return total | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist",
"def purity_score(clusters, classes):\n clusters = np.array(clusters)\n classes = np.array(classes)\n A = np.c_[(clusters,classes)]\n\n n_accurate = 0.\n\n ... | [
"0.68239075",
"0.66668284",
"0.6481002",
"0.6474694",
"0.6268896",
"0.6198322",
"0.61978954",
"0.6153407",
"0.6145863",
"0.61173046",
"0.6088561",
"0.6067123",
"0.60514313",
"0.605035",
"0.60220045",
"0.6007003",
"0.59786797",
"0.5954374",
"0.5944898",
"0.591898",
"0.5904012"... | 0.7220413 | 0 |
Returns the percent of noise elements in the dataset. | def analysis_function_noise_level(self, clustering, total_elements):
return 100.-(clustering.total_number_of_elements/float(total_elements))*100. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_noise_level(self, data):\n noise = max(data)\n noise_min = 2600\n noise_max = 4095\n ratio = (noise - noise_min)/(noise_max - noise_min)\n return int(ratio*100)",
"def getNoiseVar(img,fraction=0.95):\n last_val = np.percentile(img,fraction)\n #si(img<last_val... | [
"0.68183047",
"0.67847115",
"0.6347133",
"0.62229025",
"0.61716366",
"0.61085075",
"0.6024723",
"0.59983605",
"0.59494674",
"0.5946805",
"0.5945206",
"0.5942025",
"0.5883616",
"0.58745587",
"0.58745587",
"0.5860042",
"0.5856353",
"0.5841199",
"0.5834648",
"0.57891965",
"0.578... | 0.70178664 | 0 |
Returns the mean cluster size. | def analysis_function_mean_cluster_size(self,clustering):
sizes = get_cluster_sizes(clustering.clusters)[1]
return numpy.mean(sizes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n ... | [
"0.7243587",
"0.7179832",
"0.7015998",
"0.7009933",
"0.68922645",
"0.68876517",
"0.684724",
"0.6808377",
"0.669405",
"0.6671428",
"0.6667033",
"0.6651806",
"0.65882844",
"0.6482646",
"0.63869536",
"0.63827366",
"0.63742137",
"0.63045746",
"0.6303817",
"0.62123394",
"0.6193753... | 0.8215371 | 0 |
This method create a project in pivotal tracker | def create_project():
client = RequestManager()
project_name = "".join(choices(string.ascii_letters + string.digits, k=10))
client.set_method("POST")
client.set_endpoint("/projects")
body = {"name": project_name}
client.set_body(json.dumps(body))
response = client.execute_request()
STORED_ID['project_id'] = response.json()['id'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def project_create(project):\n client.project.create(project)",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def create_project(self, **kwargs):\n save = kwargs.get('save', True) \n if kwarg... | [
"0.7641362",
"0.74125963",
"0.74125963",
"0.74125963",
"0.73800826",
"0.7369805",
"0.72611123",
"0.7240642",
"0.72367054",
"0.7189181",
"0.71309054",
"0.70826024",
"0.70679694",
"0.7061639",
"0.6987262",
"0.698142",
"0.696992",
"0.6924102",
"0.69188553",
"0.6910896",
"0.69103... | 0.75777197 | 1 |
Static method for delete all projects. | def delete_all_projects():
client = RequestManager()
client.set_method("GET")
client.set_endpoint("/projects")
response = client.execute_request()
for project in response.json():
try:
ProjectHelper.delete_project(project["id"])
except TypeError:
LOGGER.info(project) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear(self):\n for project in Project.objects:\n project.delete()",
"def __remove_all_projects__():\n p = subprocess.Popen('rm -rf {}/.wcscanner/*'.format(context.__BASE_PATH__), shell=True)\n p.wait()",
"def clean_project(self, app_name=None, delete_all=False):\n\n if not ap... | [
"0.7438387",
"0.74334717",
"0.7418019",
"0.6865698",
"0.6836307",
"0.6812033",
"0.6775198",
"0.6739514",
"0.6739514",
"0.6619207",
"0.6606535",
"0.65799767",
"0.65798426",
"0.65713173",
"0.6563861",
"0.6534334",
"0.64856863",
"0.6476536",
"0.6473611",
"0.6437883",
"0.6420292"... | 0.8597782 | 0 |
Decorator that returns 403 status if user isn't logged in instead of redirecting to the LOGIN_URL | def login_required_403(view):
@wraps(view)
def dec_view(request, *args, **kwargs):
if not request.user.is_authenticated():
return JsonResponse({"detail": "You have to log in"}, status=403)
return view(request, *args, **kwargs)
return dec_view | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def login_required(f):\n @functools.wraps(f)\n def wrap(*args, **kwargs):\n if not user_session.is_auth:\n raise Forbidden()\n return f(*args, **kwargs)\n return wrap",
"def not_authenticated(func):\n def decorated(request, *args, **kwargs):\n if request.user.is_authen... | [
"0.7650609",
"0.7616408",
"0.7589018",
"0.7559412",
"0.7558609",
"0.7510281",
"0.7494149",
"0.7491331",
"0.7440667",
"0.74237275",
"0.74189824",
"0.74179274",
"0.7398192",
"0.73772556",
"0.7373833",
"0.73519063",
"0.7316154",
"0.7309339",
"0.7299229",
"0.7293337",
"0.7288219"... | 0.79399925 | 0 |
Login with an accesscode | def accesscode(request, code):
employee = Employee.objects.get(access_code=code)
user = employee.user
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
return HttpResponseRedirect('/') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def login():",
"def login():",
"def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res",
"def login():\n url = AUTH_URL ... | [
"0.7269786",
"0.7269786",
"0.70734197",
"0.70426136",
"0.6852028",
"0.6803463",
"0.67843354",
"0.6739689",
"0.6730675",
"0.6720502",
"0.6683724",
"0.6669751",
"0.66376764",
"0.65688664",
"0.65634507",
"0.6525562",
"0.651685",
"0.651685",
"0.649879",
"0.64984405",
"0.6491229",... | 0.74460435 | 0 |
View for all employees (in company) or for current user dependent on employee role | def all_employees(request, company_id=None):
current_employee = Employee.objects.get(user__pk=request.user.pk)
company_super_user = current_employee.isCompanySuperUserOrHigher()
if company_id:
company = Company.objects.get(pk=company_id)
else:
company = current_employee.company
if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:
raise PermissionDenied()
change_company_form = ChangeCompanyForm(initial=dict(company=company))
return TemplateResponse(
request,
'all_employees.html',
{
'user': request.user,
'company_super_user': company_super_user,
'company': company,
'change_company_form': change_company_form,
}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## rig... | [
"0.75012994",
"0.676221",
"0.675863",
"0.66391176",
"0.659517",
"0.65067995",
"0.63182765",
"0.6227569",
"0.62267536",
"0.6176886",
"0.61206603",
"0.6090563",
"0.6077315",
"0.60686785",
"0.6056699",
"0.59416133",
"0.5920941",
"0.5915269",
"0.5865308",
"0.5852588",
"0.5848536"... | 0.7280209 | 1 |
View for all employees current user is a manager for with empty development plan | def get_manager_employees(request):
current_employee = Employee.objects.get(user__pk=request.user.pk)
manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()
if manager_employees:
emp_list=[]
for emp in manager_employees:
emp_data={}
emp_data["id"] = emp.id
emp_data["username"] = emp.user.username
emp_data["first_name"] = emp.user.first_name
emp_data["last_name"] = emp.user.last_name
emp_data["manager_id"] = emp.manager.id
# emp_data["status_questions"] = emp.status_questions
# employee_role = EmployeeRole.objects.filter(employee=emp).all()
# name_role_list = []
# for obj in employee_role:
# name_role_list.append(obj.role.name)
# emp_data["roles"] = name_role_list
emp_list.append(emp_data)
data = {"employees:": emp_list}
return JsonResponse(status=201, data=data)
else:
return JsonResponse("The user with id={} isn't a manager for any user".format(current_employee.user.id),
status=404) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\... | [
"0.7106553",
"0.6753194",
"0.674842",
"0.6595502",
"0.6566336",
"0.63812053",
"0.61806154",
"0.61537486",
"0.612033",
"0.60830206",
"0.60527897",
"0.60266685",
"0.596054",
"0.5893149",
"0.5841044",
"0.57119644",
"0.56820273",
"0.56635785",
"0.5655162",
"0.5615645",
"0.5613845... | 0.73281884 | 0 |
View for creating employee in company | def create_employee(request, company_id):
company = Company.objects.get(pk=company_id)
current_employee = Employee.objects.get(user__pk=request.user.pk)
if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:
logUnauthorizedAccess("User tried to create_employee", request)
raise PermissionDenied()
form = EmployeeForm(request, initial=dict(company=company))
form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)
# form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(
# Q(company=company) | Q(company__isnull=True))
# data = {
# 'employee_form': form.cleaned_data,
# 'company': company.cleaned_data["name"]
# }
return TemplateResponse(
request,
'mus/create_employee_form.html',
{
'employee_form': form,
}
)
# data = {
# 'employee_form': form.cleaned_data,
# 'company': company.cleaned_data["name"]
# }
# return JsonResponse(status=200, data=data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_leader_model(request, company_id):\n\n errors = {'noactions': []}\n company = Company.objects.get(pk=company_id)\n currentEmpl = Employee.objects.get(user__pk=request.user.pk)\n \"\"\":type : Employee \"\"\"\n\n if not currentEmpl.isEnsoUser() and currentEmpl.company.pk != company.pk:\n ... | [
"0.6837375",
"0.67006433",
"0.66819894",
"0.6620597",
"0.6475171",
"0.64458215",
"0.6421396",
"0.6402227",
"0.6344205",
"0.6299245",
"0.626302",
"0.6249063",
"0.6230102",
"0.6173451",
"0.61211884",
"0.6115985",
"0.60931104",
"0.6089694",
"0.60648185",
"0.60492533",
"0.6024815... | 0.8076184 | 0 |
View for editing employee | def edit_employee(request, employee_id):
employee = Employee.objects.get(pk=int(employee_id))
current_employee = Employee.objects.get(user__pk=request.user.pk)
assert isinstance(employee, Employee)
assert isinstance(current_employee, Employee)
# if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:
# raise PermissionDenied()
if not current_employee.hasAccessTo(employee):
raise PermissionDenied()
form = EditEmployeeForm(request.user, employee, {
'first_name': employee.user.first_name,
'last_name': employee.user.last_name,
'email': employee.user.email,
'manager': employee.manager.id if employee.manager else 0,
'language_code': employee.language_code,
# 'development_plan_type': employee.development_plan_type.id,
'is_manager': employee.is_manager
})
if 'manager' in form.fields:
managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)
form.fields['manager'].queryset = managerQS
# form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(
# Q(company__pk=employee.company.pk) | Q(company__isnull=True)
# )
is_me = employee.user.pk == request.user.pk
return TemplateResponse(
request,
'mus/edit_employee_form.html',
{
'edit_employee_form': form,
'employee_id': employee_id,
'me': is_me,
'name': employee.user.get_full_name()
}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit_employee(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employe... | [
"0.7485564",
"0.6978975",
"0.6902276",
"0.6840151",
"0.6783405",
"0.6756372",
"0.66953164",
"0.66938245",
"0.66192317",
"0.65741867",
"0.65598595",
"0.65310377",
"0.6502672",
"0.6400611",
"0.6391832",
"0.6368707",
"0.63276947",
"0.6300049",
"0.6295493",
"0.62654054",
"0.62085... | 0.7715588 | 0 |
View for list of actions of (current) employee | def action_list(request, employee_id=None):
if employee_id:
employee = Employee.objects.get(pk=employee_id)
current_employee = Employee.objects.get(user__pk=request.user.pk)
if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:
raise PermissionDenied()
else:
employee = request.user.employee_user.first()
actions = employee.action_set.all()
return TemplateResponse(
request,
'mus/action_list.html',
dict(
actions=actions,
employee=employee
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AI... | [
"0.6613559",
"0.63822025",
"0.6160403",
"0.61327785",
"0.61140096",
"0.5988209",
"0.5908296",
"0.5907843",
"0.5899233",
"0.5892363",
"0.5886298",
"0.58746743",
"0.58155996",
"0.57719344",
"0.5768285",
"0.5741835",
"0.5715916",
"0.57119167",
"0.5709055",
"0.5664173",
"0.561676... | 0.8160547 | 0 |
View for detail of action | def action_detail(request, action_id):
employee = request.user.employee_user.first()
action = Action.objects.get(pk=int(action_id))
# if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:
if not employee.hasAccessTo(action.employee):
raise PermissionDenied()
if request.method == 'POST':
form = ActionCommentForm(request.POST)
if form.is_valid():
form.save(request.user, action)
return HttpResponseRedirect('/action/%s' % action_id)
else:
form = ActionCommentForm()
return TemplateResponse(
request,
'mus/action_detail.html',
dict(
action=action,
form=form
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n return _action_args_dict[self.action].name",
"def action(self):\n pass",
"def action(self):\n pass",
"def view(self):",
"def get_action(self, context):\n pass",
"def show(self, *args, **kwargs) -> None:\n pass",
"def show(self, *args, **kwargs) ->... | [
"0.67003417",
"0.6600345",
"0.6600345",
"0.6525399",
"0.6474398",
"0.6430569",
"0.6430569",
"0.6430569",
"0.6398873",
"0.6335036",
"0.62369657",
"0.62285584",
"0.6216931",
"0.6206771",
"0.61830616",
"0.61691725",
"0.6162804",
"0.6153047",
"0.61520946",
"0.60394746",
"0.602984... | 0.73323816 | 0 |
Create LeaderModel and send it as a PDF to the browser | def get_leader_model_pdf(currentEmpl, employees):
lm = LeaderModel()
employee_actions = {}
legend = []
colors = {}
errors = {'noactions': []}
# numbered_actions = {}
for empl in employees:
if not currentEmpl.hasAccessTo(empl):
raise PermissionDenied()
actions = empl.action_set.all()
if not len(actions):
errors['noactions'].append(empl)
continue
lkey = empl.user.first_name + " " + empl.user.last_name
legend.append(lkey)
if not lkey in employee_actions:
employee_actions[lkey] = {}
for action in actions:
if not action.difficulty or not action.type:
errors['noactions'].append(empl)
continue
circle_number = lm.addCircle(action)
latest_comment = action.getLatestComment()
employee_actions[lkey][circle_number] = {
'name': action.title,
'type': action.type,
'difficulty': action.getDifficultyText(),
'comment': latest_comment
}
if lkey not in colors:
color = lm.getEmployeeColors(empl.id)
colors[lkey] = "rgb({}, {}, {})".format(color[0], color[1], color[2])
if len(errors['noactions']):
return errors
lm_filename = path.join(settings.STATIC_ROOT, "leadermodel_{}.png".format(currentEmpl.id))
lm.writeImage(lm_filename)
#
# Write PDF
pdfFilename = path.join(settings.FILES_ROOT, "leadermodel_{}.pdf".format(currentEmpl.id))
template = get_template('mus/leader_model_pdf.html')
context = Context({
'site_url': settings.SITE_URL,
'lm_filename': lm_filename,
'employee_actions': employee_actions,
'colors': colors,
'legend': legend
})
html = template.render(context)
# html = html.replace('<li>','<li><img class="square" src="http://test.nxtlvl.dk/static/img/square.png" />')
result = open(pdfFilename, 'wb')
pisa.pisaDocument(StringIO.StringIO(
html.encode("UTF-8")), dest=result)
result.close()
wrapper = FileWrapper(file(pdfFilename))
response = HttpResponse(wrapper, content_type='application/pdf')
response['Content-Disposition'] = 'attachment;filename=ledermodel.pdf'
response['Content-Length'] = os.path.getsize(pdfFilename)
return response
# return HttpResponseRedirect('/employee/all/%d' % int(company_id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_leader_model(request, company_id):\n\n errors = {'noactions': []}\n company = Company.objects.get(pk=company_id)\n currentEmpl = Employee.objects.get(user__pk=request.user.pk)\n \"\"\":type : Employee \"\"\"\n\n if not currentEmpl.isEnsoUser() and currentEmpl.company.pk != company.pk:\n ... | [
"0.67373437",
"0.63020295",
"0.6226293",
"0.6087851",
"0.5910283",
"0.58224714",
"0.57705843",
"0.5737507",
"0.5713494",
"0.56827766",
"0.5535205",
"0.55348325",
"0.5490096",
"0.54388666",
"0.5369891",
"0.53277147",
"0.52944934",
"0.52798676",
"0.5266541",
"0.5252111",
"0.524... | 0.71051025 | 0 |
View for employee development plan details | def development_plan_details(request, development_plan_id): #, employee_id ):
# employee = Employee.objects.get(user__pk=request.user.pk)
# employee = Employee.objects.filter(pk=int(employee_id)).first()
development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))
current_employee = Employee.objects.filter(user__pk=request.user.pk).first()
all_employees = development_plan.employee_relation.all()
try:
development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))
data={}
development_plan_object_list=[]
dev_plan={}
dev_plan["id"] = development_plan.id
dev_plan["deleted"] = development_plan.deleted
if development_plan.type:
dev_plan["type"] = development_plan.type.name
# dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\
# .finished_at
dev_plan["created_at"] = development_plan.created_at
dev_plan["created_by"] = development_plan.created_by.username
development_plan_object_list.append({"dev_plan_details":dev_plan})
# manager_relation
manager_data={}
manager_data["manager_username"] = development_plan.manager_relation.user.username
manager_data["manager_first_name"] = development_plan.manager_relation.user.first_name
manager_data["manager_last_name"] = development_plan.manager_relation.user.last_name
development_plan_object_list.append({"manager_data":manager_data})
# employee_relation
employee_data={}
all_employees = development_plan.employee_relation.all()
if all_employees:
emp_list=[]
for emp in all_employees:
emp_data={}
emp_data["id"] = emp.user.id
emp_data["username"] = emp.user.username
emp_data["first_name"] = emp.user.first_name
emp_data["last_name"] = emp.user.last_name
emp_data["status_questions"] = emp.status_questions
emp_data["dev_plan_finished_at"] = DevelopmentPlanToEmployeeRelation\
.objects.get(employee=emp,
development_plan = development_plan)\
.finished_at
employee_role = EmployeeRole.objects.filter(employee=emp).all()
name_role_list = []
for obj in employee_role:
name_role_list.append(obj.role.name)
emp_data["roles"] = name_role_list
emp_list.append(emp_data)
employee_data={"all_employees":emp_list}
else:
return JsonResponse(data={"details":"Any employee has Development Plan with id={}"
.format(development_plan.id)}, status=404)
development_plan_object_list.append({"employee_data":employee_data})
# competence_parts
all_competence_parts = development_plan.competence_parts.all()
competence_list = []
questions_list = []
sliders_list = []
if all_competence_parts:
for comp_part in all_competence_parts:
comp_part_data={}
competence_d={"competence_parts": []}
comp_part_data["id"] = comp_part.id
comp_part_data["title"] = comp_part.title
comp_part_data["description"] = comp_part.description
comp_part_data["competence_status"] = comp_part.competence_status
all_questions = comp_part.question_set.all()
if all_questions:
for question in all_questions:
question_data = {}
question_data["question_id"] = question.id
question_data["title"] = question.title
question_data["competence_part"] = question.competence_part.id
answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee
if answer:
question_data["answer_id"] = answer.id
question_data["answer"] = answer.title
questions_list.append(question_data)
comp_part_data["questions"] = questions_list
all_sliders = comp_part.slider_set.all()
if all_sliders:
for slider in all_sliders:
slider_data = {}
slider_data["slider_id"] = slider.id
slider_data["scale"] = slider.scale
slider_data["competence_part"] = slider.competence_part.id
answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee
if slider:
slider_data["answer_id"] = answer.id
slider_data["answer"] = answer.slider.scale
sliders_list.append(slider_data)
comp_part_data["sliders"] = sliders_list
comp_part_data["created_at"] = comp_part.created_at
comp_part_data["created_by"] = comp_part.created_by.username
comp_part_data["updated_at"] = comp_part.updated_at
comp_part_data["updated_by"] = comp_part.updated_by.username
competence_keys_list = ['id', 'title', 'description',
'language_code', 'status']
if not competence_list:
get_competence_data(competence_keys_list, comp_part.competence, competence_d,
comp_part_data, competence_list)
else:
competence_found = False
for competence_dict in competence_list:
if competence_dict['id'] == comp_part.competence.id:
competence_dict['competence_parts'].append(comp_part_data)
competence_found = True
break
if not competence_found:
get_competence_data(competence_keys_list, comp_part.competence, competence_d,
comp_part_data, competence_list)
development_plan_object_list.append({"competences":competence_list})
else:
return JsonResponse(data={"details":"Development Plan with id={} doesn't have any Competence Part yet"
.format(development_plan.id)}, status=404)
data = {"dev_plan:": development_plan_object_list}
return JsonResponse(status=201, data=data)
except DevelopmentPlan.DoesNotExist:
return JsonResponse(data={"details":"Development Plan with this id doesn't exist"}, status=404) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=T... | [
"0.6865501",
"0.6419754",
"0.64083785",
"0.6235996",
"0.6046271",
"0.60325843",
"0.60079944",
"0.5864522",
"0.5858046",
"0.58436126",
"0.5818266",
"0.57469684",
"0.5691461",
"0.5501445",
"0.54863083",
"0.5485314",
"0.5460011",
"0.54553616",
"0.54523605",
"0.5448519",
"0.54469... | 0.68573505 | 1 |
View a list of user's development plans for manager | def get_all_user_development_plans_for_manager(request, employee_id):
current_employee = Employee.objects.get(user__pk=request.user.pk)
user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()
employee = Employee.objects.filter(pk=int(employee_id)).first()
if not current_employee:
raise PermissionDenied("You don't have any employee assigned to you.", 401)
if not current_employee.isEnsoUser() and current_employee.is_manager:
raise PermissionDenied()
actions = employee.action_set.all()
if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:
raise PermissionDenied("Employee with id={} is not assigned to you.".format(employee_id), 401)
if user_development_plans:
data={}
user_development_plans_list = []
for plan in user_development_plans:
development_plan_object_list=[]
dev_plan = {}
dev_plan["id"] = plan.id
dev_plan["deleted"] = plan.deleted
if plan.type:
dev_plan["type"] = plan.type.name
dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\
.get(employee=current_employee, development_plan = plan).finished_at
dev_plan["created_at"] = plan.created_at
dev_plan["created_by"] = plan.created_by.username
development_plan_object_list.append({"dev_plan_details":dev_plan})
manager_data = {}
manager_data["manager_username"] = plan.manager_relation.user.username
manager_data["id"] = plan.manager_relation.user.id
development_plan_object_list.append({"manager_data":manager_data})
user_development_plans_list.append(development_plan_object_list)
else:
return JsonResponse(data={"details":"Employee with id={} doesn't have any Development Plan"
.format(request.user.pk)}, status=404)
data = {"user_development_plans:": user_development_plans_list}
return JsonResponse(status=201, data=data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employe... | [
"0.7063593",
"0.682502",
"0.6374027",
"0.6217655",
"0.61145437",
"0.610118",
"0.6058615",
"0.59781164",
"0.5932294",
"0.5915161",
"0.59006524",
"0.587544",
"0.5792681",
"0.57351124",
"0.5670701",
"0.5654031",
"0.5596579",
"0.5595037",
"0.55796534",
"0.55656326",
"0.552241",
... | 0.6848171 | 1 |
View a list of development plans for active user | def get_all_development_plans_for_user(request):
current_employee = Employee.objects.get(user__pk=request.user.pk)
user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()
if not current_employee:
raise PermissionDenied("You don't have any employee assigned to you.", 401)
if user_development_plans:
data={}
user_development_plans_list = []
for plan in user_development_plans:
development_plan_object_list=[]
dev_plan = {}
dev_plan["id"] = plan.id
dev_plan["deleted"] = plan.deleted
if plan.type:
dev_plan["type"] = plan.type.name
dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\
.get(employee=current_employee, development_plan = plan).finished_at
dev_plan["created_at"] = plan.created_at
dev_plan["created_by"] = plan.created_by.username
development_plan_object_list.append({"dev_plan_details":dev_plan})
manager_data = {}
manager_data["manager_username"] = plan.manager_relation.user.username
manager_data["id"] = plan.manager_relation.user.id
development_plan_object_list.append({"manager_data":manager_data})
user_development_plans_list.append(development_plan_object_list)
else:
return JsonResponse(data={"details":"Employee with id={} doesn't have any Development Plan"
.format(request.user.pk)}, status=404)
data = {"user_development_plans:": user_development_plans_list}
return JsonResponse(status=201, data=data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=T... | [
"0.69843084",
"0.6867608",
"0.6754407",
"0.65818614",
"0.6510022",
"0.64818734",
"0.62669057",
"0.6181612",
"0.6179575",
"0.6045006",
"0.6030298",
"0.60151047",
"0.60096705",
"0.59797555",
"0.5899104",
"0.5885478",
"0.58685434",
"0.5831174",
"0.5770537",
"0.57329553",
"0.5725... | 0.7084471 | 0 |
View active development plan for active user | def get_active_development_plan_for_user(request):
current_employee = Employee.objects.get(user__pk=request.user.pk)
current_development_plan = DevelopmentPlan.objects.filter(
employee_relation=current_employee,
employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!
if not current_employee:
raise PermissionDenied()
if current_development_plan:
data={}
development_plan_object_list=[]
dev_plan={}
dev_plan["id"] = current_development_plan.id
dev_plan["deleted"] = current_development_plan.deleted
if current_development_plan.type:
dev_plan["type"] = current_development_plan.type.name
dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\
.get(employee=current_employee, development_plan = current_development_plan)\
.finished_at
dev_plan["created_at"] = current_development_plan.created_at
dev_plan["created_by"] = current_development_plan.created_by.username
development_plan_object_list.append({"dev_plan_details":dev_plan})
# manager_relation
manager_data={}
manager_data["manager_username"] = current_development_plan.manager_relation.user.username
manager_data["manager_first_name"] = current_development_plan.manager_relation.user.first_name
manager_data["manager_last_name"] = current_development_plan.manager_relation.user.last_name
development_plan_object_list.append({"manager_data":manager_data})
# employee_relation
employee_data={}
all_employees = current_development_plan.employee_relation.all()
if all_employees:
emp_list=[]
for emp in all_employees:
emp_data={}
emp_data["id"] = emp.user.id
emp_data["username"] = emp.user.username
emp_data["first_name"] = emp.user.first_name
emp_data["last_name"] = emp.user.last_name
emp_data["status_questions"] = emp.status_questions
employee_role = EmployeeRole.objects.filter(employee=emp).all()
name_role_list = []
for obj in employee_role:
name_role_list.append(obj.role.name)
emp_data["roles"] = name_role_list
emp_list.append(emp_data)
employee_data={"all_employees":emp_list}
else:
return JsonResponse(data={"details":"Any employee has Development Plan with id={}"
.format(current_development_plan.id)}, status=404)
development_plan_object_list.append({"employee_data":employee_data})
# competence_parts
all_competence_parts = current_development_plan.competence_parts.all()
competence_list = []
questions_list = []
sliders_list = []
if all_competence_parts:
for comp_part in all_competence_parts:
comp_part_data={}
competence_d={"competence_parts": []}
comp_part_data["id"] = comp_part.id
comp_part_data["title"] = comp_part.title
comp_part_data["description"] = comp_part.description
comp_part_data["competence_status"] = comp_part.competence_status
all_questions = comp_part.question_set.all()
print all_questions
if all_questions:
for question in all_questions:
question_data = {}
question_data["question_id"] = question.id
question_data["title"] = question.title
question_data["competence_part"] = question.competence_part.id
answer = Answer.objects.filter(question__id = question.id,
employee=current_employee).first()
if answer:
question_data["answer_id"] = answer.id
question_data["answer"] = answer.title
questions_list.append(question_data)
comp_part_data["questions"] = questions_list
all_sliders = comp_part.slider_set.all()
if all_sliders:
for slider in all_sliders:
slider_data = {}
slider_data["slider_id"] = slider.id
slider_data["scale"] = slider.scale
slider_data["competence_part"] = slider.competence_part.id
answer = Answer.objects.filter(slider__id = slider.id,
employee=current_employee).first()
if slider:
slider_data["answer_id"] = answer.id
slider_data["answer"] = answer.slider.scale
sliders_list.append(slider_data)
comp_part_data["sliders"] = sliders_list
comp_part_data["created_at"] = comp_part.created_at
comp_part_data["created_by"] = comp_part.created_by.username
comp_part_data["updated_at"] = comp_part.updated_at
comp_part_data["updated_by"] = comp_part.updated_by.username
competence_keys_list = ['id', 'title', 'description',
'language_code', 'status']
if not competence_list:
get_competence_data(competence_keys_list, comp_part.competence, competence_d,
comp_part_data, competence_list)
else:
competence_found = False
for competence_dict in competence_list:
if competence_dict['id'] == comp_part.competence.id:
competence_dict['competence_parts'].append(comp_part_data)
competence_found = True
break
if not competence_found:
get_competence_data(competence_keys_list, comp_part.competence, competence_d,
comp_part_data, competence_list)
development_plan_object_list.append({"competences":competence_list})
else:
return JsonResponse(data={"details":"Development Plan with id={} doesn't have any Competence Part yet"
.format(current_development_plan.id)}, status=404)
data = {"dev_plan:": development_plan_object_list}
return JsonResponse(status=201, data=data)
else:
return JsonResponse(data={"details": "The user with id={} doesn't have an active Development Plan"
.format(current_employee.user.id)}, status=404) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan",
"def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)",
"def get_all_development_plans_for_user(request):\n ... | [
"0.6693725",
"0.66212505",
"0.6312085",
"0.61751384",
"0.61356807",
"0.60612977",
"0.60564137",
"0.59662765",
"0.5901057",
"0.58823967",
"0.5879139",
"0.585081",
"0.58050555",
"0.57663673",
"0.5763864",
"0.57417154",
"0.5737406",
"0.57106596",
"0.57047117",
"0.56790864",
"0.5... | 0.6910045 | 0 |
Get or Update goal by id | def self_goal_by_id(request, goal_id):
current_user = request.user
fields_map = {
'goal_answers': lambda g: [
{
'id': answ.id,
'title': answ.title,
"created_by": answ.created_by.username,
"created_at": answ.created_at,
"file": answ.file.url
} for answ in g.goal_answers.all()
]
}
fields = ['title', 'goal_answers', 'id', 'is_achieved']
goal = Goal.objects.get(pk=goal_id)
if request.method == 'POST':
if goal.created_by != current_user:
raise PermissionDenied("You can edit only your own goals")
f = GoalForm(data=request.json_body)
if not f.is_valid():
return JsonResponse(data={"detail": json.loads(f.errors.as_json())}, status=400)
goal = f.save(current_user, goal)
return JsonResponse(
data={f: fields_map[f](goal) if f in fields_map else getattr(goal, f) for f in fields}, status=200
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def goal(self, goal_id):\r\n return goals.Goal(self, goal_id)",
"def goal(self, goal_id):\r\n return Goal(self, goal_id)",
"def getById(self, id_goals):\n lparam = [id_goals]\n rep = AbstractDAO._read(self, R_READBYID, lparam)\n return self.__fetch_to_object(rep, True)",
"d... | [
"0.68321764",
"0.6705152",
"0.6541664",
"0.6067706",
"0.6032877",
"0.6005164",
"0.59921056",
"0.5976328",
"0.59698373",
"0.5885591",
"0.58422995",
"0.58416253",
"0.5819017",
"0.5806884",
"0.58029574",
"0.5794082",
"0.57710695",
"0.57252264",
"0.57099134",
"0.56292725",
"0.562... | 0.67704624 | 1 |
This function takes a csv file as an argument deduplicates the file and writes the deduplicated dataset to a csv file if a path for the output file is provided as the second argument It returns the deduplicated dataframe Parameters , type, return values | def dataDedup_csv(infile, outfile=None):
if fpath.isfile(infile):
dataset = pd.read_csv(infile, sep=',', dtype='unicode')
dedup_dataset = dataset.drop_duplicates()
if outfile!=None:
dedup_dataset.to_csv(outfile,
encoding='utf-8', index=False,
header=False)
return dedup_dataset
else:
print("file \"%s\" does not exist... or is not a file..." %(infile)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def strip_duplicates(in_file, out_file, sep_type=\"\", header_rows=0):\n\n util.check_output_dir(out_file)\n\n if header_rows !=0: header=read_header(in_file, num_header_rows=header_rows, sep_type =\"\")\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whi... | [
"0.7729395",
"0.6743207",
"0.6410341",
"0.6338948",
"0.6320352",
"0.6265429",
"0.611104",
"0.60951626",
"0.6093322",
"0.5940891",
"0.5861167",
"0.57967466",
"0.57279533",
"0.5659171",
"0.5623228",
"0.5598873",
"0.5594488",
"0.5593631",
"0.5475531",
"0.54566574",
"0.5449391",
... | 0.78255796 | 0 |
This function checks for the size of a dataframe and splits it into parts containing approximately 1 million records as the default number of records for each dataframe.It also provides the option of writing the split dataframes to the disk. Parameters , type, return values | def dataFrameSplit(df, norec=1000000, outfile= None):
# calculation of the no. of rows of the dataframe
df_rsz = len(df.index)
if df_rsz>norec:
no_splits = np.ceil(df_rsz/norec)
dfarr = np.array_split(df,no_splits)
return dfarr
else:
print("The dataframe doesn't have sufficient records")
# printing to disk when
if outfile!=None:
i=0
for arr in dfarr:
arr.to_csv("D:\\ddf"+str(i+1)+".csv",encoding='utf-8', index=False,
header=False)
i = i+1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_dataframe(df, size=10*1024*1024):\n \n # size of each row\n row_size = df.memory_usage().sum() / len(df)\n # maximum number of rows in each segment\n row_limit = int(size // row_size)\n # number of segments\n seg_num = (len(df)+row_limit-1)//row_limit\n # split df into segments\n ... | [
"0.73547655",
"0.67736286",
"0.6035461",
"0.59178704",
"0.5877275",
"0.58713686",
"0.584475",
"0.5791403",
"0.57895786",
"0.5737366",
"0.57224447",
"0.57164466",
"0.5714758",
"0.57108814",
"0.5700032",
"0.5689146",
"0.56854934",
"0.5663078",
"0.5655827",
"0.5648846",
"0.56323... | 0.70206773 | 1 |
Embed words in a sequence using GLoVE model | def __glove_embed__(sequence, model):
embedded = []
for word in sequence:
embedded.append(model[word])
return embedded | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def embed(self, sequence):\n words = sequence.split(' ')\n vecs = [self._E[self._w2i[i]] if i in self._w2i else self._E[self._w2i[\"UNK\"]]\n for i in words]\n return vecs",
"def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.w... | [
"0.6890587",
"0.6337464",
"0.6256114",
"0.6213781",
"0.61906195",
"0.6124841",
"0.6122943",
"0.5976476",
"0.5967347",
"0.59281605",
"0.5920156",
"0.59157413",
"0.591105",
"0.5867188",
"0.58647937",
"0.5861155",
"0.58420885",
"0.58300006",
"0.58148724",
"0.5812145",
"0.5800837... | 0.80186236 | 0 |
Get BERT embeddings from a dataloader generator. | def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
with torch.set_grad_enabled(False):
embeddings = {'ids': [],
'embeddings': [],
'labels': []
}
# get BERT training embeddings
if metadata:
for local_ids, local_data, local_meta, local_labels in data_generator:
local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \
local_meta, \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data, local_meta)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
else:
for local_ids, local_data, local_labels in data_generator:
local_data, local_labels = local_data.to(device).long().squeeze(1), \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
return embeddings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_embeddings(model, loader, device=torch.device('cpu')):\n embeddings = []\n labels = []\n for item in loader:\n data, label = item\n data = data.view(-1, 1, data.shape[-1])\n data = data.to(device)\n label = label.to(device)\n output = model(data).squeeze(1)\n\n ... | [
"0.6092691",
"0.60436624",
"0.60151154",
"0.59234816",
"0.59059787",
"0.5782936",
"0.5690828",
"0.5666181",
"0.5597089",
"0.5583591",
"0.5570702",
"0.55484855",
"0.55165946",
"0.5495378",
"0.5463972",
"0.5451432",
"0.54469055",
"0.54453945",
"0.5405851",
"0.54010916",
"0.5367... | 0.73473763 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.