hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68097eba15392e818a32df460c2104fd3ca64819
| 4,729
|
py
|
Python
|
examples/resources/aws/subnet.py
|
cfeenstra67/statey
|
6d127ed48265e2e072fbb26486458a4b28a333ec
|
[
"MIT"
] | 4
|
2021-02-16T19:34:38.000Z
|
2022-01-31T16:44:14.000Z
|
examples/resources/aws/subnet.py
|
cfeenstra67/statey
|
6d127ed48265e2e072fbb26486458a4b28a333ec
|
[
"MIT"
] | null | null | null |
examples/resources/aws/subnet.py
|
cfeenstra67/statey
|
6d127ed48265e2e072fbb26486458a4b28a333ec
|
[
"MIT"
] | null | null | null |
import asyncio
import contextlib
from typing import Dict, Any, Optional
import aioboto3
import botocore
import statey as st
SubnetConfigType = st.Struct[
"vpc_id" : st.String,
"cidr_block" : st.String,
# Optional args
"ipv6_cidr_block" : ~st.String,
"map_public_ip_on_launch" : st.Boolean(default=False),
"assign_ipv6_address_on_creation" : st.Boolean(default=False),
# Missing: tags
]
SubnetType = st.Struct[
"vpc_id" : st.String,
"cidr_block" : st.String,
"ipv6_association_id" : ~st.String,
"ipv6_cidr_block" : ~st.String,
"map_public_ip_on_launch" : st.Boolean,
"assign_ipv6_address_on_creation" : st.Boolean,
# Missing: tags
"id" : st.String,
"owner_id" : st.Integer,
]
class SubnetMachine(st.SimpleMachine):
"""
Maching representing an AWS subnet
"""
UP = st.State("UP", SubnetConfigType, SubnetType)
@contextlib.asynccontextmanager
@contextlib.asynccontextmanager
@staticmethod
async def create_task(self, config: SubnetConfigType) -> SubnetType:
"""
Create a new subnet
"""
async with self.resource_ctx() as ec2, self.client_ctx() as client:
kws = {"CidrBlock": config["cidr_block"], "VpcId": config["vpc_id"]}
if config["ipv6_cidr_block"] is not None:
kws["Ipv6CidrBlock"] = config["ipv6_cidr_block"]
subnet = await ec2.create_subnet(**kws)
yield await self.convert_instance(subnet)
map_public_ip_on_launch = await subnet.map_public_ip_on_launch
if map_public_ip_on_launch != config["map_public_ip_on_launch"]:
await client.modify_subnet_attribute(
MapPublicIpOnLaunch={"Value": config["map_public_ip_on_launch"]},
SubnetId=subnet.id,
)
await subnet.load()
yield await self.convert_instance(subnet)
assign_ipv6_address_on_creation = (
await subnet.assign_ipv6_address_on_creation
)
if (
assign_ipv6_address_on_creation
!= config["assign_ipv6_address_on_creation"]
):
await client.modify_subnet_attribute(
AssignIpv6AddressOnCreation={
"Value": config["assign_ipv6_address_on_creation"]
},
SubnetId=subnet.id,
)
await subnet.load()
yield await self.convert_instance(subnet)
async def delete_task(self, current: SubnetType) -> st.EmptyType:
"""
Delete the subnet
"""
async with self.resource_ctx() as ec2:
subnet = await ec2.Subnet(current["id"])
await subnet.delete()
subnet_resource = st.MachineResource("aws_subnet", SubnetMachine)
Subnet = subnet_resource.s
RESOURCES = [subnet_resource]
def register(registry: Optional["Registry"] = None) -> None:
"""
Register resources in this module
"""
if registry is None:
registry = st.registry
for resource in RESOURCES:
registry.register(resource)
| 31.317881
| 85
| 0.60055
|
import asyncio
import contextlib
from typing import Dict, Any, Optional
import aioboto3
import botocore
import statey as st
SubnetConfigType = st.Struct[
"vpc_id" : st.String,
"cidr_block" : st.String,
# Optional args
"ipv6_cidr_block" : ~st.String,
"map_public_ip_on_launch" : st.Boolean(default=False),
"assign_ipv6_address_on_creation" : st.Boolean(default=False),
# Missing: tags
]
SubnetType = st.Struct[
"vpc_id" : st.String,
"cidr_block" : st.String,
"ipv6_association_id" : ~st.String,
"ipv6_cidr_block" : ~st.String,
"map_public_ip_on_launch" : st.Boolean,
"assign_ipv6_address_on_creation" : st.Boolean,
# Missing: tags
"id" : st.String,
"owner_id" : st.Integer,
]
class SubnetMachine(st.SimpleMachine):
"""
Maching representing an AWS subnet
"""
UP = st.State("UP", SubnetConfigType, SubnetType)
@contextlib.asynccontextmanager
async def resource_ctx(self):
async with aioboto3.resource("ec2") as ec2:
yield ec2
@contextlib.asynccontextmanager
async def client_ctx(self):
async with aioboto3.client("ec2") as client:
yield client
@staticmethod
async def convert_instance(subnet: "Subnet") -> Dict[str, Any]:
out = {"id": subnet.id}
ipv6_associations = []
(
out["owner_id"],
out["cidr_block"],
# ipv6_associations,
out["map_public_ip_on_launch"],
out["assign_ipv6_address_on_creation"],
out["vpc_id"],
) = await asyncio.gather(
subnet.owner_id,
subnet.cidr_block,
# subnet.ipv6_cidr_block_assocation_set,
subnet.map_public_ip_on_launch,
subnet.assign_ipv6_address_on_creation,
subnet.vpc_id,
)
if ipv6_associations:
association = ipv6_associations[0]
out["ipv6_association_id"] = association["AssociationId"]
out["ipv6_cidr_block"] = association["Ipv6CidrBlock"]
else:
out["ipv6_association_id"] = None
out["ipv6_cidr_block"] = None
return out
async def refresh_state(self, data: Any) -> Optional[Any]:
async with self.resource_ctx() as ec2:
instance = await ec2.Subnet(data["id"])
try:
await instance.load()
except botocore.exceptions.ClientError:
return None
return await self.convert_instance(instance)
async def create_task(self, config: SubnetConfigType) -> SubnetType:
"""
Create a new subnet
"""
async with self.resource_ctx() as ec2, self.client_ctx() as client:
kws = {"CidrBlock": config["cidr_block"], "VpcId": config["vpc_id"]}
if config["ipv6_cidr_block"] is not None:
kws["Ipv6CidrBlock"] = config["ipv6_cidr_block"]
subnet = await ec2.create_subnet(**kws)
yield await self.convert_instance(subnet)
map_public_ip_on_launch = await subnet.map_public_ip_on_launch
if map_public_ip_on_launch != config["map_public_ip_on_launch"]:
await client.modify_subnet_attribute(
MapPublicIpOnLaunch={"Value": config["map_public_ip_on_launch"]},
SubnetId=subnet.id,
)
await subnet.load()
yield await self.convert_instance(subnet)
assign_ipv6_address_on_creation = (
await subnet.assign_ipv6_address_on_creation
)
if (
assign_ipv6_address_on_creation
!= config["assign_ipv6_address_on_creation"]
):
await client.modify_subnet_attribute(
AssignIpv6AddressOnCreation={
"Value": config["assign_ipv6_address_on_creation"]
},
SubnetId=subnet.id,
)
await subnet.load()
yield await self.convert_instance(subnet)
async def delete_task(self, current: SubnetType) -> st.EmptyType:
"""
Delete the subnet
"""
async with self.resource_ctx() as ec2:
subnet = await ec2.Subnet(current["id"])
await subnet.delete()
subnet_resource = st.MachineResource("aws_subnet", SubnetMachine)
Subnet = subnet_resource.s
RESOURCES = [subnet_resource]
def register(registry: Optional["Registry"] = None) -> None:
"""
Register resources in this module
"""
if registry is None:
registry = st.registry
for resource in RESOURCES:
registry.register(resource)
| 1,439
| 0
| 105
|
e00e30fcd11f60809f3d4358645861477fc96613
| 2,173
|
py
|
Python
|
constants.py
|
julzerinos/python-opencv-plant_detection
|
f7895d42cdf6c8d8a7fa43dd624024f185542207
|
[
"MIT"
] | 10
|
2020-08-29T08:30:24.000Z
|
2022-02-15T14:06:19.000Z
|
constants.py
|
julzerinos/python-opencv-plant_detection
|
f7895d42cdf6c8d8a7fa43dd624024f185542207
|
[
"MIT"
] | null | null | null |
constants.py
|
julzerinos/python-opencv-plant_detection
|
f7895d42cdf6c8d8a7fa43dd624024f185542207
|
[
"MIT"
] | 3
|
2020-08-29T08:30:31.000Z
|
2021-01-09T07:52:45.000Z
|
class constants:
"""Class of constants for each component of detector
"""
class bgsub:
"""Background subtraction/segmentation
mod [str] the segmentation model (MOG2, KNN, GMG)
"""
mod = 'MOG2'
class HSV:
"""HSV inRange filtering
maximum values and initial values
"""
max_value = 255
max_value_H = 360//2
low_H = 40
low_S = 30
low_V = 30
high_H = 75
high_S = 255
high_V = 255
low_H_name = 'Low H'
low_S_name = 'Low S'
low_V_name = 'Low V'
high_H_name = 'High H'
high_S_name = 'High S'
high_V_name = 'High V'
class window:
"""Window control
names of windows
"""
window1 = 'Altered'
window2 = 'Original'
class asth:
"""Aesthetics
font [enum int] font used for description
text [bool] should text be imprinted on image?
"""
font = 0
text = False
class cntr:
"""Controls for program
next_k - next image
prev_k - prev image
save - save single image (in mode)
save_all - save all images (in mode)
exit_k - exit the program
dice - calculate dice value
dice_more - show all dice values based on dataset
m1_k etc. - mode selection
modes [dict] dictionary with mode names
"""
next_k = ord('m')
prev_k = ord('n')
save = ord('s')
save_all = ord('z')
exit_k = 27
dice = ord('d')
dice_more = ord('f')
m1_k = ord('1')
m2_k = ord('2')
m3_k = ord('3')
m4_k = ord('4')
m5_k = ord('5')
modes = {
0: 'original',
1: 'hsv_filter',
2: 'ws_mask',
3: 'ws_mask_bg',
4: 'fgbg_segm',
5: 'ws_fgbg_segm'
}
class xtra:
"""Ends and odds
disco [bool] random colors for masks on each loop?
show_save_all [bool] run saving all in foreground?
"""
disco = False
show_save_all = True
| 21.949495
| 58
| 0.495628
|
class constants:
"""Class of constants for each component of detector
"""
class bgsub:
"""Background subtraction/segmentation
mod [str] the segmentation model (MOG2, KNN, GMG)
"""
mod = 'MOG2'
class HSV:
"""HSV inRange filtering
maximum values and initial values
"""
max_value = 255
max_value_H = 360//2
low_H = 40
low_S = 30
low_V = 30
high_H = 75
high_S = 255
high_V = 255
low_H_name = 'Low H'
low_S_name = 'Low S'
low_V_name = 'Low V'
high_H_name = 'High H'
high_S_name = 'High S'
high_V_name = 'High V'
class window:
"""Window control
names of windows
"""
window1 = 'Altered'
window2 = 'Original'
class asth:
"""Aesthetics
font [enum int] font used for description
text [bool] should text be imprinted on image?
"""
font = 0
text = False
class cntr:
"""Controls for program
next_k - next image
prev_k - prev image
save - save single image (in mode)
save_all - save all images (in mode)
exit_k - exit the program
dice - calculate dice value
dice_more - show all dice values based on dataset
m1_k etc. - mode selection
modes [dict] dictionary with mode names
"""
next_k = ord('m')
prev_k = ord('n')
save = ord('s')
save_all = ord('z')
exit_k = 27
dice = ord('d')
dice_more = ord('f')
m1_k = ord('1')
m2_k = ord('2')
m3_k = ord('3')
m4_k = ord('4')
m5_k = ord('5')
modes = {
0: 'original',
1: 'hsv_filter',
2: 'ws_mask',
3: 'ws_mask_bg',
4: 'fgbg_segm',
5: 'ws_fgbg_segm'
}
class xtra:
"""Ends and odds
disco [bool] random colors for masks on each loop?
show_save_all [bool] run saving all in foreground?
"""
disco = False
show_save_all = True
| 0
| 0
| 0
|
194ab0dc74cd18f13ee2868097d1372c6db981b3
| 7,181
|
py
|
Python
|
archive/_s3.py
|
zpz/upathlib
|
5bf7013be244c5f1b276e0b0ac1b9d7637666ceb
|
[
"MIT"
] | null | null | null |
archive/_s3.py
|
zpz/upathlib
|
5bf7013be244c5f1b276e0b0ac1b9d7637666ceb
|
[
"MIT"
] | 19
|
2021-07-08T06:42:31.000Z
|
2021-10-15T09:07:17.000Z
|
archive/_s3.py
|
zpz/upathlib
|
5bf7013be244c5f1b276e0b0ac1b9d7637666ceb
|
[
"MIT"
] | null | null | null |
import logging
from pathlib import Path
import time
import boto3
# This module requires a directory `.aws/` containing credentials in the home directory,
# or environment variables `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY`.
logger = logging.getLogger(__name__)
| 32.640909
| 98
| 0.569002
|
import logging
from pathlib import Path
import time
import boto3
# This module requires a directory `.aws/` containing credentials in the home directory,
# or environment variables `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY`.
logger = logging.getLogger(__name__)
def _get_client():
return boto3.session.Session().client('s3')
def _has_key(s3_client, bucket: str, key: str) -> bool:
response = s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
for obj in response.get('Contents', []):
if obj['Key'] == key:
return True
return False
def _delete_key(s3_client, bucket: str, key: str) -> None:
s3_client.delete_object(Bucket=bucket, Key=key)
def has_key(bucket: str, key: str) -> bool:
return _has_key(_get_client(), bucket, key)
def delete_key(bucket: str, key: str) -> None:
return _delete_key(_get_client(), bucket, key)
class Bucket:
def __init__(self, bucket):
for header in ('s3://', 's3n://'):
if bucket.startswith(header):
bucket = bucket[len(header):]
break
if '/' in bucket:
bucket = bucket[: bucket.find('/')]
self._bucket = boto3.resource('s3').Bucket(bucket)
@property
def name(self):
return self._bucket.name
def _remove_bucket_key(self, key):
for header in ('s3://', 's3n://'):
if key.startswith(header):
assert key.startswith(header + self.name + '/')
key = key[(len(header) + len(self.name) + 1):]
return key
def upload(self, local_file: str, s3_key: str) -> None:
'''
Upload a single file to S3.
`local_file`: path to local file.
`s3_key`: S3 'key'.
Example: suppose current bucket is s3://my-org, with
local_file: /home/zepu/work/data/xyz/memo.txt
s3_key: mysurvey/memo
--> remote file: s3://my-org/mysurvey/memo
Existing file with the same name with be overwritten.
'''
local_file = Path(local_file)
if not local_file.is_file():
raise Exception('a file name is expected')
data = open(local_file, 'rb')
s3_key = self._remove_bucket_key(s3_key)
self._bucket.put_object(Key=s3_key, Body=data)
def upload_tree(self, local_path: str, s3_path: str,
pattern: str = '**/*') -> None:
'''
`local_path`: directory whose content will be uploaded.
If `local_path` contains a trailing `/`, then no part of this path name
becomes part of the remote name; otherwise, the final node in this path name
becomes the leading segment of the remote name.
`pattern`:
'*' (everything directly under `local_path`),
'**/*' (everything recursively under `local_path`),
'*.py' (every Python module directly under `local_path`),
'**/*.py' (every Python module recursively under `local_path`),
etc.
Example: suppose current bucket is s3://my-org, with
local_path: /home/me/work/data/xyz, containing
.../xyz/a.txt,
.../xyz/b.txt,
../xyz/zyx/aa.txt)
s3_path: dataset1
s3_name: '**/*'
--> remote files:
s3://my-org/dataset1/xyz/a.txt
s3://my-org/dataset1/xyz/b.txt
s3://my-org/dataset1/xyz/zyx/aa.txt
local_path: /home/me/work/data/xyz/ (note the trailing '/')
--> remote files:
s3://my-org/dataset1/a.txt
s3://my-org/dataset1/b.txt
s3://my-org/dataset1/zyx/aa.txt
'''
with_root = not local_path.endswith('/')
local_path = Path(local_path)
if not local_path.is_dir():
raise Exception('a directory name is expected')
nodes = [v for v in local_path.glob(pattern) if v.is_file()]
s3_path = self._remove_bucket_key(s3_path)
for node in nodes:
key = node.relative_to(local_path)
if with_root:
key = local_path.name / key
key = s3_path / key
self.upload(node, str(key))
def download(self, s3_key: str, local_file: str = None) -> None:
s3_key = self._remove_bucket_key(s3_key)
if local_file is None:
local_file = str(Path(s3_key).name)
self._bucket.download_file(s3_key, local_file)
def download_tree(self, s3_path: str, local_path: str = None) -> None:
s3_path = self._remove_bucket_key(s3_path)
raise NotImplementedError
def ls(self, key, recursive: bool = False):
# List object names directly or recursively named like `key*`.
# If `key` is `abc/def/`,
# then `abc/def/123/45` will return as `123/45`
#
# If `key` is `abc/def`,
# then `abc/defgh/45` will return as `defgh/45`;
# `abc/def/gh` will return as `/gh`.
#
# So if you know `key` is a `directory`, then it's a good idea to
# include the trailing `/` in `key`.
key = self._remove_bucket_key(key)
z = self._bucket.objects.filter(Prefix=key)
if key.endswith('/'):
key_len = len(key)
else:
key_len = key.rfind('/') + 1
if recursive:
return (v.key[key_len:] for v in z)
# this is a generator, b/c there can be many, many elements
else:
keys = set()
for v in z:
vv = v.key[key_len:]
idx = vv.find('/')
if idx >= 0:
vv = vv[: idx]
keys.add(vv)
return sorted(list(keys))
def has(self, key: str) -> bool:
key = self._remove_bucket_key(key)
if not hasattr(self, '_s3'):
self._s3 = _get_client()
return _has_key(self._s3, self._bucket.name, key)
def delete(self, key: str) -> None:
key = self._remove_bucket_key(key)
if not hasattr(self, '_s3'):
self._s3 = _get_client()
_delete_key(self._s3, self._bucket.name, key)
def delete_tree(self, s3_path: str) -> int:
s3_path = self._remove_bucket_key(s3_path)
n = 0
while True:
nn = self._delete_tree(s3_path)
if nn == 0:
break
n = max(n, nn)
time.sleep(0.5)
return n
def _delete_tree(self, s3_path: str) -> int:
'''
Return the number of objects deleted.
After this operation, the 'folder' `s3_path` is also gone.
TODO: this is not the fastest way to do it.
'''
assert s3_path.endswith('/')
n = 0
for k in self.ls(s3_path, recursive=True):
kk = s3_path + k
self.delete(kk)
n += 1
return n
def reduce_boto_logging():
import boto3.s3.transfer
assert boto3.s3.transfer # silence pyflakes
for name in logging.Logger.manager.loggerDict.keys():
if name.startswith('boto') or name.startswith('urllib3') or name.startswith('s3transfer'):
logging.getLogger(name).setLevel(logging.ERROR)
| 3,441
| 3,300
| 161
|
760e3cb5157583c0d49bb5b803c4560b4deaa6f3
| 3,105
|
py
|
Python
|
experiments/onmtf_cuda.py
|
lucasbrunialti/biclustering-experiments
|
30e51e23b0c3d91939bf7ec49c47d3035e6ecb57
|
[
"BSD-2-Clause"
] | 3
|
2017-11-21T08:21:32.000Z
|
2020-03-10T14:57:06.000Z
|
experiments/onmtf_cuda.py
|
lucasbrunialti/biclustering-experiments
|
30e51e23b0c3d91939bf7ec49c47d3035e6ecb57
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/onmtf_cuda.py
|
lucasbrunialti/biclustering-experiments
|
30e51e23b0c3d91939bf7ec49c47d3035e6ecb57
|
[
"BSD-2-Clause"
] | 4
|
2017-01-18T18:10:37.000Z
|
2021-12-15T02:23:15.000Z
|
import numpy as np
import cudamat as cm
| 28.486239
| 76
| 0.58132
|
import numpy as np
import cudamat as cm
def matrix_factorization_clustering(X_aux, k, l, norm=False, num_iters=100):
cm.cublas_init()
m, n = X_aux.shape
U = cm.CUDAMatrix(np.random.rand(m, k))
S = cm.CUDAMatrix(np.random.rand(k, l))
V = cm.CUDAMatrix(np.random.rand(n, l))
X = cm.CUDAMatrix(X_aux)
# if norm:
# X = Normalizer().fit_transform(X)
XV = cm.CUDAMatrix(np.random.rand(m, l))
XVSt = cm.CUDAMatrix(np.random.rand(m, k))
US = cm.CUDAMatrix(np.random.rand(m, l))
USVt = cm.CUDAMatrix(np.random.rand(m, n))
USVtXt = cm.CUDAMatrix(np.random.rand(m, m))
USVtXtU = cm.CUDAMatrix(np.random.rand(m, k))
U_aux = cm.CUDAMatrix(np.random.rand(m, k))
XtUS = cm.CUDAMatrix(np.random.rand(m, l))
VSt = cm.CUDAMatrix(np.random.rand(n, k))
VStUt = cm.CUDAMatrix(np.random.rand(n, m))
UtX = cm.CUDAMatrix(np.random.rand(k, n))
VStUtXV = cm.CUDAMatrix(np.random.rand(n, l))
V_aux = cm.CUDAMatrix(np.random.rand(n, l))
UtXV = cm.CUDAMatrix(np.random.rand(k, l))
UtUS = cm.CUDAMatrix(np.random.rand(k, l))
UtUSVt = cm.CUDAMatrix(np.random.rand(k, n))
UtUSVtV = cm.CUDAMatrix(np.random.rand(k, l))
S_aux = cm.CUDAMatrix(np.random.rand(k, l))
error_best = np.inf
error = np.inf
for i in range(num_iters):
# compute U
cm.dot(X, V, target=XV)
cm.dot(XV, S.T, target=XVSt)
if i is 0:
cm.dot(U, S, target=US)
cm.dot(US, V.T, target=USVt)
cm.dot(USVt, X.T, target=USVtXt)
cm.dot(USVtXt, U, target=USVtXtU)
cm.divide(XVSt, USVtXtU, U_aux)
cm.mult(U, U_aux, U)
# compute V
cm.dot(U, S, target=US)
cm.dot(X.T, US, target=XtUS)
cm.dot(V, S.T, target=VSt)
cm.dot(VSt, U.T, target=VStUt)
cm.dot(VStUt, XV, target=VStUtXV)
cm.divide(XtUS, VStUtXV, target=V_aux)
cm.mult(V, V_aux, V)
# compute S
cm.dot(U.T, X, target=UtX)
cm.dot(UtX, V, target=UtXV)
cm.dot(U.T, US, target=UtUS)
cm.dot(UtUS, V.T, UtUSVt)
cm.dot(UtUSVt, V, target=UtUSVtV)
cm.divide(UtXV, UtUSVtV, target=S_aux)
cm.mult(S, S_aux, target=S)
error_ant = error
cm.dot(U, S, target=US)
cm.dot(US, V.T, target=USVt)
error = cm.sum(cm.pow(cm.subtract(X, USVt), 2), axis=0)
if error < error_best:
U_best_cm = U
S_best_cm = S
V_best_cm = V
error_best = error
if np.abs(error - error_ant) <= 0.000001:
break
U_best = U_best_cm.asarray()
S_best = S_best_cm.asarray()
V_best = V_best_cm.asarray()
Du = np.diag(np.ones(m).dot(U_best))
Dv = np.diag(np.ones(n).dot(V_best))
U_norm = U_best.dot( np.diag(S_best.dot(Dv).dot(np.ones(l))) )
V_norm = V_best.dot( np.diag(np.ones(k).dot(Du).dot(S_best)) )
rows_ind = np.argmax(U_best, axis=1)
cols_ind = np.argmax(V_best, axis=1)
cm.shutdown()
return U_norm, S_best, V_norm, rows_ind, cols_ind, error_best
| 3,040
| 0
| 23
|
75f81d84c2a063746a49d48076491405182c7fc8
| 10,799
|
py
|
Python
|
storitch/handlers/store.py
|
thomaserlang/storitch
|
dbcf97af547d9cb1ae5c3994654e8db03e43a253
|
[
"MIT"
] | null | null | null |
storitch/handlers/store.py
|
thomaserlang/storitch
|
dbcf97af547d9cb1ae5c3994654e8db03e43a253
|
[
"MIT"
] | 1
|
2022-03-03T00:35:08.000Z
|
2022-03-03T00:35:08.000Z
|
storitch/handlers/store.py
|
thomaserlang/storitch
|
dbcf97af547d9cb1ae5c3994654e8db03e43a253
|
[
"MIT"
] | null | null | null |
from typing import Union, Dict, List, Any, Tuple, Optional
import json, tempfile, os, logging, re, shutil, mimetypes, good
from tornado import httpclient, web, queues
from storitch import utils, config
from storitch.decorators import run_on_executor
from wand import image, exceptions
@web.stream_request_body
def thumbnail(path: str) -> bool:
'''
Specify the path and add a "@" followed by the arguments.
This allows us to easily get the original file, make the changes,
save the file with the full path, so the server never has to do
the operation again, as long as the arguments are precisely the same.
Arguments can be specified as followed:
SXx - Width, keeps aspect ratio
SYx - Height, keeps aspect ration.
Ignored if SX is specified.
ROTATEx - Number of degrees you wise to
rotate the image. Supports
negative numbers.
RESx - Resolution, used for PDF
files, the higher the number,
the better the quality.
PAGEx - Page index in the PDF document.
The file format can be specified by ending the path with
E.g. .jpg, .png, .tiff, etc.
The arguments can be separated with _ or just
don't separate them. Works either way.
Example:
/foo/14bc...@SX1024_ROTATE90.png
Resizes the image to a width of 1024, rotates it 90 degrees and converts
it to a PNG file.
:param path: str
'''
p = path.split('@')
if len(p) != 2:
return False
if os.path.exists(path):
return True
size_match, rotate_match, resolution_match, \
page_match, format_match = __parse_arguments(p[1])
# a specific page in a PDF document
if page_match and page_match.group(1) != None:
page = '[{}]'.format(page_match.group(1))
else:
# Prevent a dicom file or pdf file from extracting multiple images
page = '[0]'
o = {
'filename': p[0]+page
}
if resolution_match and resolution_match.group(1) != None:
o['resolution'] = int(resolution_match.group(1))
with image.Image(**o) as img:
if size_match:
# resize, keep aspect ratio
if size_match.group(1) != None:# width
img.transform(resize=size_match.group(1))
elif size_match.group(2) != None:# height
img.transform(resize='x'+size_match.group(2))
if rotate_match:
if rotate_match.group(1) != None:
img.rotate(int(rotate_match.group(1)))
if format_match:
img.format = format_match.group(1)
img.save(filename=path)
return True
| 32.429429
| 96
| 0.567738
|
from typing import Union, Dict, List, Any, Tuple, Optional
import json, tempfile, os, logging, re, shutil, mimetypes, good
from tornado import httpclient, web, queues
from storitch import utils, config
from storitch.decorators import run_on_executor
from wand import image, exceptions
class Base_handler(web.RequestHandler):
def write_object(self, data: Union[Dict, List]) -> None:
self.set_json_headers()
self.write(json.dumps(data))
def set_json_headers(self) -> None:
self.set_header('Cache-Control', 'no-cache, must-revalidate')
self.set_header('Expires', 'Sat, 26 Jul 1997 05:00:00 GMT')
self.set_header('Content-Type', 'application/json')
def write_error(self, status_code: int, **kwargs) -> None:
self.set_json_headers()
error = {'error': 'Unknown error'}
if 'exc_info' in kwargs:
error['error'] = str(kwargs['exc_info'][1])
self.set_status(status_code)
self.write_object(error)
@run_on_executor
def move_to_permanent_store(self, temp_path: str, filename: str) -> Dict[str, Any]:
return move_to_permanent_store(temp_path, filename)
def get_content_type(self, path: str) -> str:
# From: https://www.tornadoweb.org/en/stable/_modules/tornado/web.html#StaticFileHandler
mime_type, encoding = mimetypes.guess_type(path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
class Multipart_handler(Base_handler):
async def post(self) -> None:
if 'multipart/form-data' not in self.request.headers.get('Content-Type').lower():
raise web.HTTPError(400,
'Content-Type must be multipart/form-data, was: {}'.format(
self.request.headers.get('Content-Type')
)
)
if not self.request.files:
raise web.HTTPError(400, 'No files uploaded')
self.set_status(201)
results = []
for n in self.request.files:
for f in self.request.files[n]:
temp_path = await self.save_body(f['body'])
f['body'] = None
r = await self.move_to_permanent_store(temp_path, f['filename'])
results.append(r)
self.write_object(results)
@run_on_executor
def save_body(self, body: bytes) -> str:
with tempfile.NamedTemporaryFile(delete=False, prefix='storitch-') as t:
t.write(body)
return t.name
@web.stream_request_body
class Session_handler(Base_handler):
__schema__ = good.Schema({
'finished': good.Boolean(),
'filename': good.All(str, good.Length(min=1, max=255)),
good.Optional('session'): str,
})
def prepare(self) -> None:
if 'application/octet-stream' not in self.request.headers.get('Content-Type').lower():
raise web.HTTPError(400,
'Content-Type must be application/octet-stream, was: {}'.format(
self.request.headers.get('Content-Type')
)
)
j = self.request.headers.get('storitch-json', None)
if not j:
raise web.HTTPError(400, 'Header: storitch-json must be set')
data = json.loads(j)
self.h_finished = data['finished']
self.h_filename = data['filename']
self.h_session = data.get('session')
if not self.h_session:
self.h_session = self.new_session()
self.temp_path = os.path.join(
tempfile.gettempdir(),
self.h_session
)
if not os.path.isfile(self.temp_path):
raise web.HTTPError(400, 'Session unknown')
self.file = open(self.temp_path, 'ab')
def validate_json(self, data: Dict[str, Any]) -> Union[Dict[str, Any], List]:
try:
return self.__schema__(data)
except good.MultipleInvalid as ee:
data = []
for e in ee:
data.append(
'{}: {}'.format(
'.'.join(str(x) for x in e.path),
e.message,
)
)
raise web.HTTPError(400,' - '.join(d for d in data))
except good.Invalid as e:
raise web.HTTPError(400, '{}: {}'.format(
'.'.join(str(x) for x in e.path),
e.message,
))
async def data_received(self, chunk: bytes) -> None:
self.file.write(chunk)
async def put(self) -> None:
self.file.close()
if self.h_finished:
r = await self.move_to_permanent_store(self.temp_path, self.h_filename)
self.write_object(r)
else:
self.write_object({
'session': self.h_session,
})
def new_session(self) -> str:
with tempfile.NamedTemporaryFile(delete=False, prefix='storitch-') as t:
return os.path.basename(t.name)
class Thumbnail_handler(Base_handler):
async def get(self, hash_: Optional[str] = None) -> None:
if not hash_ or len(hash_) < 64:
raise web.HTTPError(404, 'Please specify a file hash')
path = os.path.abspath(os.path.join(
os.path.realpath(config['store_path']),
utils.path_from_hash(hash_),
hash_
))
if '@' in hash_:
path = await self.thumbnail(path)
if not path:
self.write('Failed to create the thumbnail')
self.set_header('Content-Type', self.get_content_type(path))
with open(path, 'rb') as f:
while True:
d = f.read(16384)
if not d:
break
self.write(d)
@run_on_executor
def thumbnail(self, path: str) -> str:
if thumbnail(path):
return path
def move_to_permanent_store(temp_path: str, filename: str) -> Dict[str, Any]:
hash_ = utils.file_sha256(temp_path)
path = os.path.abspath(os.path.join(
os.path.realpath(config['store_path']),
utils.path_from_hash(hash_),
))
if not os.path.exists(path):
os.makedirs(path, mode=0o755)
path = os.path.join(path, hash_)
if not os.path.exists(path):
shutil.move(temp_path, path)
os.chmod(path, 0o755)
else:
os.remove(temp_path)
extra = {
'type': 'file',
}
d = os.path.splitext(filename)
if len(d) == 2:
ext = d[1]
if ext.lower() in config['image_exts']:
wh = image_width_high(path)
if wh:
wh['type'] = 'image'
if wh:
extra.update(wh)
return {
'stored': True,
'filesize': os.stat(path).st_size,
'hash': hash_,
'filename': filename,
**extra
}
def image_width_high(path) -> Optional[Dict[str, int]]:
try:
with image.Image(filename=path) as img:
return {
'width': img.width,
'height': img.height,
}
except (ValueError, exceptions.MissingDelegateError):
return None
def thumbnail(path: str) -> bool:
'''
Specify the path and add a "@" followed by the arguments.
This allows us to easily get the original file, make the changes,
save the file with the full path, so the server never has to do
the operation again, as long as the arguments are precisely the same.
Arguments can be specified as followed:
SXx - Width, keeps aspect ratio
SYx - Height, keeps aspect ration.
Ignored if SX is specified.
ROTATEx - Number of degrees you wise to
rotate the image. Supports
negative numbers.
RESx - Resolution, used for PDF
files, the higher the number,
the better the quality.
PAGEx - Page index in the PDF document.
The file format can be specified by ending the path with
E.g. .jpg, .png, .tiff, etc.
The arguments can be separated with _ or just
don't separate them. Works either way.
Example:
/foo/14bc...@SX1024_ROTATE90.png
Resizes the image to a width of 1024, rotates it 90 degrees and converts
it to a PNG file.
:param path: str
'''
p = path.split('@')
if len(p) != 2:
return False
if os.path.exists(path):
return True
size_match, rotate_match, resolution_match, \
page_match, format_match = __parse_arguments(p[1])
# a specific page in a PDF document
if page_match and page_match.group(1) != None:
page = '[{}]'.format(page_match.group(1))
else:
# Prevent a dicom file or pdf file from extracting multiple images
page = '[0]'
o = {
'filename': p[0]+page
}
if resolution_match and resolution_match.group(1) != None:
o['resolution'] = int(resolution_match.group(1))
with image.Image(**o) as img:
if size_match:
# resize, keep aspect ratio
if size_match.group(1) != None:# width
img.transform(resize=size_match.group(1))
elif size_match.group(2) != None:# height
img.transform(resize='x'+size_match.group(2))
if rotate_match:
if rotate_match.group(1) != None:
img.rotate(int(rotate_match.group(1)))
if format_match:
img.format = format_match.group(1)
img.save(filename=path)
return True
def __parse_arguments(arguments: str) -> Tuple[str, str, str, str, str]:
size_match = re.search(
'SX(\d+)|SY(\d+)',
arguments,
re.I
)
rotate_match = re.search(
'ROTATE(-?\d+)',
arguments,
re.I
)
resolution_match = re.search(
'RES(\d+)',
arguments,
re.I
)
page_match = re.search(
'PAGE(\d+)',
arguments,
re.I
)
format_match = re.search(
'\.([a-z0-9]{2,5})',
arguments,
re.I
)
return (
size_match,
rotate_match,
resolution_match,
page_match,
format_match,
)
| 7,197
| 690
| 160
|
b0fbc439e2b9764f97c049f14ced20df3b6321a9
| 7,052
|
py
|
Python
|
ddf_library/functions/graph_lib/page_rank.py
|
eubr-bigsea/Compss-Python
|
09ab7c474c8badc9932de3e1148f62ffba16b0b2
|
[
"Apache-2.0"
] | 3
|
2017-08-22T11:32:02.000Z
|
2021-08-09T09:35:51.000Z
|
ddf_library/functions/graph_lib/page_rank.py
|
eubr-bigsea/Compss-Python
|
09ab7c474c8badc9932de3e1148f62ffba16b0b2
|
[
"Apache-2.0"
] | null | null | null |
ddf_library/functions/graph_lib/page_rank.py
|
eubr-bigsea/Compss-Python
|
09ab7c474c8badc9932de3e1148f62ffba16b0b2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Lucas Miguel S Ponce"
__email__ = "[email protected]"
from ddf_library.bases.metadata import Status, OPTGroup
from ddf_library.bases.context_base import ContextBase
from ddf_library.ddf import DDF
from ddf_library.bases.ddf_model import ModelDDF
from ddf_library.utils import generate_info, read_stage_file, \
create_stage_files, save_stage_file
from pycompss.api.api import compss_wait_on, compss_delete_object
from pycompss.api.task import task
from pycompss.functions.reduce import merge_reduce
from pycompss.api.parameter import FILE_IN, COLLECTION_IN
import pandas as pd
import numpy as np
__all__ = ['PageRank']
# TODO: this algorithm can be optimized
class PageRank(ModelDDF):
# noinspection PyUnresolvedReferences
"""
PageRank is one of the methods Google uses to determine a page's
relevance or importance. The idea that Page Rank brought up was that, the
importance of any web page can be judged by looking at the pages that link
to it.
PageRank can be utilized in others domains. For example, may also be used
as a methodology to measure the apparent impact of a community.
.. note: This parallel implementation assumes that the list of unique
vertex can be fit in memory.
:Example:
>>> pr = PageRank(damping_factor=0.85)
>>> ddf2 = pr.transform(ddf1, inlink_col='col1', outlink_col='col2')
"""
def __init__(self, damping_factor=0.85, max_iters=100):
"""
:param damping_factor: Default damping factor is 0.85;
:param max_iters: Maximum number of iterations (default is 100).
"""
super(PageRank, self).__init__()
self.inlink_col = None
self.outlink_col = None
self.max_iters = max_iters
self.damping_factor = damping_factor
def transform(self, data, outlink_col, inlink_col):
"""
Generates the PageRank's result.
:param data: DDF
:param outlink_col: Out-link vertex;
:param inlink_col: In-link vertex;
:return: DDF with Vertex and Rank columns
"""
df, nfrag, tmp = self._ddf_initial_setup(data)
self.inlink_col = inlink_col
self.outlink_col = outlink_col
col1 = 'Vertex'
col2 = 'Rank'
"""
Load all URL's from the data and initialize their neighbors.
Initialize each page’s rank to 1.0.
"""
adj_list = [{} for _ in range(nfrag)]
rank_list = [{} for _ in range(nfrag)]
counts_in = [{} for _ in range(nfrag)]
for i in range(nfrag):
adj_list[i], rank_list[i], counts_in[i] = \
_pr_create_adjlist(df[i], inlink_col, outlink_col)
counts_in = merge_reduce(_merge_counts, counts_in)
for i in range(nfrag):
adj_list[i] = _pr_update_adjlist(adj_list[i], counts_in)
compss_delete_object(counts_in)
for iteration in range(self.max_iters):
"""Calculate the partial contribution of each vertex."""
contributions = [_calc_contribuitions(adj_list[i], rank_list[i])
for i in range(nfrag)]
merged_c = merge_reduce(_merge_counts, contributions)
"""Update each vertex rank in the fragment."""
rank_list = [_update_rank(rank_list[i], merged_c,
self.damping_factor)
for i in range(nfrag)]
merged_table = merge_ranks(rank_list, col1, col2)
result, info = _pagerank_split(merged_table, nfrag)
new_state_uuid = ContextBase\
.ddf_add_task(self.name,
status=Status.STATUS_COMPLETED,
opt=OPTGroup.OPT_OTHER,
info_data=info,
parent=[tmp.last_uuid],
result=result,
function=self.transform,
parameters=data)
return DDF(last_uuid=new_state_uuid)
@task(returns=3, data=FILE_IN)
@task(returns=1)
def _merge_counts(counts1, counts2):
"""
Merge the frequency of each vertex.
.. note:: It assumes that the frequency list can be fitted in memory.
"""
for v_out in counts2:
if v_out in counts1:
counts1[v_out] += counts2[v_out]
else:
counts1[v_out] = counts2[v_out]
return counts1
@task(returns=1)
def _pr_update_adjlist(adj1, counts_in):
"""Update the frequency of vertex in each fragment."""
for key in adj1:
adj1[key][1] = counts_in[key]
return adj1
@task(returns=1)
def _calc_contribuitions(adj, ranks):
"""Calculate the partial contribution of each vertex."""
contrib = {}
for key in adj:
urls = adj[key][0]
num_neighbors = adj[key][1]
rank = ranks[key]
for url in urls:
if url not in contrib:
# out = contrib
contrib[url] = rank/num_neighbors
else:
contrib[url] += rank/num_neighbors
return contrib
@task(returns=1)
def _update_rank(ranks, contrib, factor):
"""Update the rank of each vertex in the fragment."""
bo = 1.0 - factor
for key in contrib:
if key in ranks:
ranks[key] = bo + factor*contrib[key]
return ranks
@task(returns=1, dfs=COLLECTION_IN)
def merge_ranks(dfs, c1, c2):
"""Create the final result. Merge and remove duplicates vertex."""
dfs = [pd.DataFrame(ranks.items(), columns=[c1, c2]) for ranks in dfs]
dfs = pd.concat(dfs, ignore_index=True)\
.drop_duplicates(ignore_index=True)\
.sort_values(['Rank'], ascending=False, ignore_index=True)
return dfs
def _pagerank_split(result, nfrag):
"""Split the list of vertex into nfrag parts.
Note: the list of unique vertex and their ranks must be fit in memory.
"""
result = compss_wait_on(result)
result = np.array_split(result, nfrag)
outfiles = create_stage_files(nfrag)
info = [0] * nfrag
for f, table in enumerate(result):
save_stage_file(outfiles[f], table)
info[f] = generate_info(table, f)
return outfiles, info
| 29.630252
| 78
| 0.620108
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Lucas Miguel S Ponce"
__email__ = "[email protected]"
from ddf_library.bases.metadata import Status, OPTGroup
from ddf_library.bases.context_base import ContextBase
from ddf_library.ddf import DDF
from ddf_library.bases.ddf_model import ModelDDF
from ddf_library.utils import generate_info, read_stage_file, \
create_stage_files, save_stage_file
from pycompss.api.api import compss_wait_on, compss_delete_object
from pycompss.api.task import task
from pycompss.functions.reduce import merge_reduce
from pycompss.api.parameter import FILE_IN, COLLECTION_IN
import pandas as pd
import numpy as np
__all__ = ['PageRank']
# TODO: this algorithm can be optimized
class PageRank(ModelDDF):
# noinspection PyUnresolvedReferences
"""
PageRank is one of the methods Google uses to determine a page's
relevance or importance. The idea that Page Rank brought up was that, the
importance of any web page can be judged by looking at the pages that link
to it.
PageRank can be utilized in others domains. For example, may also be used
as a methodology to measure the apparent impact of a community.
.. note: This parallel implementation assumes that the list of unique
vertex can be fit in memory.
:Example:
>>> pr = PageRank(damping_factor=0.85)
>>> ddf2 = pr.transform(ddf1, inlink_col='col1', outlink_col='col2')
"""
def __init__(self, damping_factor=0.85, max_iters=100):
"""
:param damping_factor: Default damping factor is 0.85;
:param max_iters: Maximum number of iterations (default is 100).
"""
super(PageRank, self).__init__()
self.inlink_col = None
self.outlink_col = None
self.max_iters = max_iters
self.damping_factor = damping_factor
def transform(self, data, outlink_col, inlink_col):
"""
Generates the PageRank's result.
:param data: DDF
:param outlink_col: Out-link vertex;
:param inlink_col: In-link vertex;
:return: DDF with Vertex and Rank columns
"""
df, nfrag, tmp = self._ddf_initial_setup(data)
self.inlink_col = inlink_col
self.outlink_col = outlink_col
col1 = 'Vertex'
col2 = 'Rank'
"""
Load all URL's from the data and initialize their neighbors.
Initialize each page’s rank to 1.0.
"""
adj_list = [{} for _ in range(nfrag)]
rank_list = [{} for _ in range(nfrag)]
counts_in = [{} for _ in range(nfrag)]
for i in range(nfrag):
adj_list[i], rank_list[i], counts_in[i] = \
_pr_create_adjlist(df[i], inlink_col, outlink_col)
counts_in = merge_reduce(_merge_counts, counts_in)
for i in range(nfrag):
adj_list[i] = _pr_update_adjlist(adj_list[i], counts_in)
compss_delete_object(counts_in)
for iteration in range(self.max_iters):
"""Calculate the partial contribution of each vertex."""
contributions = [_calc_contribuitions(adj_list[i], rank_list[i])
for i in range(nfrag)]
merged_c = merge_reduce(_merge_counts, contributions)
"""Update each vertex rank in the fragment."""
rank_list = [_update_rank(rank_list[i], merged_c,
self.damping_factor)
for i in range(nfrag)]
merged_table = merge_ranks(rank_list, col1, col2)
result, info = _pagerank_split(merged_table, nfrag)
new_state_uuid = ContextBase\
.ddf_add_task(self.name,
status=Status.STATUS_COMPLETED,
opt=OPTGroup.OPT_OTHER,
info_data=info,
parent=[tmp.last_uuid],
result=result,
function=self.transform,
parameters=data)
return DDF(last_uuid=new_state_uuid)
@task(returns=3, data=FILE_IN)
def _pr_create_adjlist(data, inlink, outlink):
cols = [outlink, inlink]
adj = {}
ranks = {}
data = read_stage_file(data, cols=cols)
for link in data[cols].to_numpy():
v_out, v_in = link
# Generate a partial adjacency list.
if v_out in adj:
adj[v_out][0].append(v_in)
adj[v_out][1] += 1
else:
adj[v_out] = [[v_in], 1]
# Generate a partial rank list of each vertex.
if v_out not in ranks:
ranks[v_out] = 1.0 # Rank, contributions, main
if v_in not in ranks:
ranks[v_in] = 1.0
# Generate a partial list of frequency of each vertex.
counts_in = {}
for v_out in adj:
counts_in[v_out] = adj[v_out][1]
return adj, ranks, counts_in
@task(returns=1)
def _merge_counts(counts1, counts2):
"""
Merge the frequency of each vertex.
.. note:: It assumes that the frequency list can be fitted in memory.
"""
for v_out in counts2:
if v_out in counts1:
counts1[v_out] += counts2[v_out]
else:
counts1[v_out] = counts2[v_out]
return counts1
@task(returns=1)
def _pr_update_adjlist(adj1, counts_in):
"""Update the frequency of vertex in each fragment."""
for key in adj1:
adj1[key][1] = counts_in[key]
return adj1
@task(returns=1)
def _calc_contribuitions(adj, ranks):
"""Calculate the partial contribution of each vertex."""
contrib = {}
for key in adj:
urls = adj[key][0]
num_neighbors = adj[key][1]
rank = ranks[key]
for url in urls:
if url not in contrib:
# out = contrib
contrib[url] = rank/num_neighbors
else:
contrib[url] += rank/num_neighbors
return contrib
@task(returns=1)
def _update_rank(ranks, contrib, factor):
"""Update the rank of each vertex in the fragment."""
bo = 1.0 - factor
for key in contrib:
if key in ranks:
ranks[key] = bo + factor*contrib[key]
return ranks
@task(returns=1, dfs=COLLECTION_IN)
def merge_ranks(dfs, c1, c2):
"""Create the final result. Merge and remove duplicates vertex."""
dfs = [pd.DataFrame(ranks.items(), columns=[c1, c2]) for ranks in dfs]
dfs = pd.concat(dfs, ignore_index=True)\
.drop_duplicates(ignore_index=True)\
.sort_values(['Rank'], ascending=False, ignore_index=True)
return dfs
def _pagerank_split(result, nfrag):
"""Split the list of vertex into nfrag parts.
Note: the list of unique vertex and their ranks must be fit in memory.
"""
result = compss_wait_on(result)
result = np.array_split(result, nfrag)
outfiles = create_stage_files(nfrag)
info = [0] * nfrag
for f, table in enumerate(result):
save_stage_file(outfiles[f], table)
info[f] = generate_info(table, f)
return outfiles, info
| 768
| 0
| 22
|
faeba7f3ca3382662de94211ec44a097d3c7ac9f
| 1,390
|
py
|
Python
|
UART_py/tela_serial.py
|
Rodrigo98Matos/Interface-Grafca-Serial
|
b996655e7376229856116ec3d150f3210a82cb4d
|
[
"MIT"
] | null | null | null |
UART_py/tela_serial.py
|
Rodrigo98Matos/Interface-Grafca-Serial
|
b996655e7376229856116ec3d150f3210a82cb4d
|
[
"MIT"
] | null | null | null |
UART_py/tela_serial.py
|
Rodrigo98Matos/Interface-Grafca-Serial
|
b996655e7376229856116ec3d150f3210a82cb4d
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as sg
from uart_serial import uart
arduino = uart()
| 37.567568
| 117
| 0.538129
|
import PySimpleGUI as sg
from uart_serial import uart
arduino = uart()
class tela:
def __init__(self, portas):
#Layout
sg.theme('Black')
layout = [
[sg.Text('Porta:',size=(7,0)),sg.Combo(values=(portas),key='porta')],
[sg.Text('Baudrate:',size=(7,0)),sg.Combo(values=([9600,115200]),key='baudrate')],
[sg.Checkbox('Dados da Missão',key='dados_missao'),sg.Checkbox('Beacon',key='beacon')],
[sg.Button('Continuar')],
]
"""layout = [
[sg.Text('Nome',size=(5,0)),sg.Input(size=(15,0),key='nome')],
[sg.Text('Idade',size=(5,0)),sg.Input(size=(5,0),key='idade')],
[sg.Text('Email:')],
[sg.Checkbox('Gmail',key='gmail'),sg.Checkbox('Hotmail',key='hotmail'),sg.Checkbox('Yahoo',key='yahoo')],
[sg.Radio('Sim','email',key='email_sim'),sg.Radio('Não','email',key='email_nao')],
[sg.Button('Enviar dados')],
[sg.Output(size=(30,20))]
]"""
#Janela
self.janela = sg.Window("dados do Usuário").layout(layout)
#Extrair os dados da tela
self.button, self.values = self.janela.Read()
def iniciar(self):
while True:
self.button, self.values = self.janela.Read()
if self.button == sg.WIN_CLOSED:
break
print(self.values)
| 1,250
| -10
| 80
|
a747752e784483f13e0672fa7ef44261d743dd9f
| 403
|
py
|
Python
|
babybuddy/migrations/0017_promocode_max_usage_per_account.py
|
amcquistan/babyasst
|
310a7948f06b71ae0d62593a3b5932abfd4eb444
|
[
"BSD-2-Clause"
] | null | null | null |
babybuddy/migrations/0017_promocode_max_usage_per_account.py
|
amcquistan/babyasst
|
310a7948f06b71ae0d62593a3b5932abfd4eb444
|
[
"BSD-2-Clause"
] | null | null | null |
babybuddy/migrations/0017_promocode_max_usage_per_account.py
|
amcquistan/babyasst
|
310a7948f06b71ae0d62593a3b5932abfd4eb444
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-11-27 20:28
from django.db import migrations, models
| 21.210526
| 49
| 0.615385
|
# Generated by Django 2.2.6 on 2019-11-27 20:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0016_auto_20191127_1424'),
]
operations = [
migrations.AddField(
model_name='promocode',
name='max_usage_per_account',
field=models.IntegerField(default=1),
),
]
| 0
| 289
| 23
|
838a777e045278ea92893c031457352439926ec4
| 1,897
|
py
|
Python
|
transact/models.py
|
BrilliantGrant/Transaction-application
|
988fdbd6ed8a1fea9ca8366eeb2b30275b727836
|
[
"MIT",
"Unlicense"
] | null | null | null |
transact/models.py
|
BrilliantGrant/Transaction-application
|
988fdbd6ed8a1fea9ca8366eeb2b30275b727836
|
[
"MIT",
"Unlicense"
] | null | null | null |
transact/models.py
|
BrilliantGrant/Transaction-application
|
988fdbd6ed8a1fea9ca8366eeb2b30275b727836
|
[
"MIT",
"Unlicense"
] | null | null | null |
from django.contrib.auth.models import User
from tinymce.models import HTMLField
from django.db import models
# Create your models here.
| 25.293333
| 72
| 0.710596
|
from django.contrib.auth.models import User
from tinymce.models import HTMLField
from django.db import models
# Create your models here.
class Pic(models.Model):
pic = models.ImageField(upload_to = "pics/",null = True)
user = models.ForeignKey(User,null=True)
pic_name = models.CharField(max_length = 30,null = True)
likes = models.IntegerField(default=0)
pic_caption = models.TextField(null = True)
pub_date = models.DateTimeField(auto_now_add=True,null=True)
# profile = models.ForeignKey(Profile, null=True)
comments = models.IntegerField(default=0)
def __str__(self):
return self.pic_name
def delete_pic(self):
self.delete()
def save_pic(self):
self.save()
def update_caption(self,new_caption):
self.pic_caption = new_caption
self.save()
@classmethod
def get_pics_by_user(cls,id):
sent_pics = Pic.objects.filter(user_id=id)
return sent_pics
@classmethod
def get_pics_by_id(cls,id):
fetched_pic = Pic.objects.get(id = id)
return fetched_pic
class Meta:
ordering = ['-pub_date']
def __str__(self):
return self.user.username
def save_profile(self):
self.save()
class Profile(models.Model):
username = models.CharField(default='User',max_length=30)
email = models.CharField(default='email',max_length=30)
profile_pic = models.ImageField(upload_to = "profile/",null=True)
Phone_number = models.CharField(max_length =30)
Amount = models.CharField(max_length =30)
withdraw = models.CharField(max_length =30)
Balance = models.CharField(max_length =30)
def __str__(self):
return self.username
def delete_profile(self):
self.delete()
def save_profile(self):
self.save()
@classmethod
def search_profile(cls,search_term):
got_profiles = cls.objects.filter(first_name__icontains = search_term)
return got_profiles
| 504
| 1,209
| 45
|
4a30bc154d6f294fba0d9fd2e54096f76bfb7a5f
| 553
|
py
|
Python
|
migrations/versions/6564c80d1598_.py
|
realtimclemans/SafetyHealthDotCloud
|
c7eca52f3e6519de34b05ba573a5778423c2dae2
|
[
"MIT"
] | null | null | null |
migrations/versions/6564c80d1598_.py
|
realtimclemans/SafetyHealthDotCloud
|
c7eca52f3e6519de34b05ba573a5778423c2dae2
|
[
"MIT"
] | 1
|
2021-02-15T15:58:54.000Z
|
2021-02-15T15:58:54.000Z
|
migrations/versions/6564c80d1598_.py
|
realtimclemans/SafetyHealthDotCloud
|
c7eca52f3e6519de34b05ba573a5778423c2dae2
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 6564c80d1598
Revises: c3c2dc9000d3
Create Date: 2021-06-19 17:03:45.811885
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6564c80d1598'
down_revision = 'c3c2dc9000d3'
branch_labels = None
depends_on = None
| 19.068966
| 65
| 0.687161
|
"""empty message
Revision ID: 6564c80d1598
Revises: c3c2dc9000d3
Create Date: 2021-06-19 17:03:45.811885
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6564c80d1598'
down_revision = 'c3c2dc9000d3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 208
| 0
| 46
|
5eb6460889a29c993a99192a3b46f1a9dae54de9
| 1,181
|
py
|
Python
|
setup.py
|
tizz98/xl
|
4534a1792f878964fedd87432c438ab6364ece49
|
[
"MIT"
] | 1
|
2018-03-30T17:36:41.000Z
|
2018-03-30T17:36:41.000Z
|
setup.py
|
tizz98/xl
|
4534a1792f878964fedd87432c438ab6364ece49
|
[
"MIT"
] | null | null | null |
setup.py
|
tizz98/xl
|
4534a1792f878964fedd87432c438ab6364ece49
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
with open('README.md') as f:
long_description = f.read()
from xl import __str_version__, __author__
setup(
name='xl',
version=__str_version__,
description='A nice way of generating excel formulas in python.',
long_description=long_description,
url='https://github.com/tizz98/xl',
download_url='https://github.com/tizz98/xl/tarball/%s' % (
__str_version__
),
author=__author__,
author_email='[email protected]',
license='MIT',
packages=['xl'],
keywords='xl excel formulas formula formulae',
zip_safe=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| 27.465116
| 69
| 0.647756
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
with open('README.md') as f:
long_description = f.read()
from xl import __str_version__, __author__
setup(
name='xl',
version=__str_version__,
description='A nice way of generating excel formulas in python.',
long_description=long_description,
url='https://github.com/tizz98/xl',
download_url='https://github.com/tizz98/xl/tarball/%s' % (
__str_version__
),
author=__author__,
author_email='[email protected]',
license='MIT',
packages=['xl'],
keywords='xl excel formulas formula formulae',
zip_safe=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| 0
| 0
| 0
|
91c554ac6c77dd73935b5f3788cc38b6a16bd729
| 420
|
py
|
Python
|
GroupCondition.py
|
simplymanas/python-learning
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
[
"Apache-2.0"
] | 4
|
2020-08-18T05:29:38.000Z
|
2021-03-13T19:01:10.000Z
|
GroupCondition.py
|
simplymanas/python-learning
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
[
"Apache-2.0"
] | null | null | null |
GroupCondition.py
|
simplymanas/python-learning
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
[
"Apache-2.0"
] | 1
|
2020-08-29T12:57:17.000Z
|
2020-08-29T12:57:17.000Z
|
# all, any for group condition check
# Manas Dash
# 22th July 2020
# think of any (or) and all (and) as series of logical or and and operators
healthy_percentage = 100
have_money = 0
no_of_friends = 5
mental_happiness = [
healthy_percentage > 50,
have_money > 0,
no_of_friends >= 1
]
if all(mental_happiness):
print('happiness inside')
if any(mental_happiness):
print('happiness outside')
# happiness outside
| 17.5
| 75
| 0.738095
|
# all, any for group condition check
# Manas Dash
# 22th July 2020
# think of any (or) and all (and) as series of logical or and and operators
healthy_percentage = 100
have_money = 0
no_of_friends = 5
mental_happiness = [
healthy_percentage > 50,
have_money > 0,
no_of_friends >= 1
]
if all(mental_happiness):
print('happiness inside')
if any(mental_happiness):
print('happiness outside')
# happiness outside
| 0
| 0
| 0
|
41559822f3cd5754bbcded35318328ac3c23e9ab
| 12,012
|
py
|
Python
|
zx64c/ast.py
|
khrynczenko/zx64c
|
5a95bef1dff281266ea3f0d0bfd63d27ab5e9965
|
[
"Apache-2.0"
] | null | null | null |
zx64c/ast.py
|
khrynczenko/zx64c
|
5a95bef1dff281266ea3f0d0bfd63d27ab5e9965
|
[
"Apache-2.0"
] | null | null | null |
zx64c/ast.py
|
khrynczenko/zx64c
|
5a95bef1dff281266ea3f0d0bfd63d27ab5e9965
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
import abc
from typing import List, Text, TypeVar, Generic
from abc import ABC
from functools import singledispatchmethod
from dataclasses import dataclass
from zx64c.types import Type, Callable
T = TypeVar("T")
@SjasmplusSnapshotProgram.__eq__.register
@Program.__eq__.register
@dataclass
@Function.__eq__.register
@Block.__eq__.register
@If.__eq__.register
@Print.__eq__.register
@Let.__eq__.register
@Assignment.__eq__.register
@Return.__eq__.register
@Equal.__eq__.register
@NotEqual.__eq__.register
@Addition.__eq__.register
@Subtraction.__eq__.register
@Negation.__eq__.register
@FunctionCall.__eq__.register
@Identifier.__eq__.register
@Unsignedint.__eq__.register
@Bool.__eq__.register
| 24.217742
| 88
| 0.635614
|
from __future__ import annotations
import abc
from typing import List, Text, TypeVar, Generic
from abc import ABC
from functools import singledispatchmethod
from dataclasses import dataclass
from zx64c.types import Type, Callable
T = TypeVar("T")
class AstVisitor(ABC, Generic[T]):
@abc.abstractmethod
def visit_program(self, node: Program) -> T:
pass
@abc.abstractmethod
def visit_function(self, node: Function) -> T:
pass
@abc.abstractmethod
def visit_block(self, node: Block) -> T:
pass
@abc.abstractmethod
def visit_if(self, node: Block) -> T:
pass
@abc.abstractmethod
def visit_print(self, node: Print) -> T:
pass
@abc.abstractmethod
def visit_let(self, node: Assignment) -> T:
pass
@abc.abstractmethod
def visit_return(self, node: Return) -> T:
pass
@abc.abstractmethod
def visit_assignment(self, node: Assignment) -> T:
pass
@abc.abstractmethod
def visit_equal(self, node: Equal) -> T:
pass
@abc.abstractmethod
def visit_not_equal(self, node: NotEqual) -> T:
pass
@abc.abstractmethod
def visit_addition(self, node: Addition) -> T:
pass
@abc.abstractmethod
def visit_subtraction(self, node: Subtraction) -> T:
pass
@abc.abstractmethod
def visit_negation(self, node: Negation) -> T:
pass
@abc.abstractmethod
def visit_function_call(self, node: FunctionCall) -> T:
pass
@abc.abstractmethod
def visit_identifier(self, node: Identifier) -> T:
pass
@abc.abstractmethod
def visit_unsignedint(self, node: Unsignedint) -> T:
pass
@abc.abstractmethod
def visit_bool(self, node: Bool) -> T:
pass
class SourceContext:
def __init__(self, line: int, column: int):
self._line = line
self._column = column
def __eq__(self, rhs: SourceContext) -> bool:
return self._line == rhs._line and self._column == rhs._column
@property
def line(self) -> int:
return self._line
@property
def column(self) -> int:
return self._column
class Ast(ABC):
def __init__(self, context: SourceContext):
self._context = context
@property
def context(self) -> SourceContext:
return self._context
@abc.abstractmethod
def visit(self, v: AstVisitor[T]) -> T:
pass
@abc.abstractmethod
def __eq__(self, rhs: Ast) -> bool:
pass
class SjasmplusSnapshotProgram(Ast):
def __init__(self, program: Program, source_name: Text):
super().__init__(program.context)
self.program = program
self.source_name = source_name
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_program(self)
@SjasmplusSnapshotProgram.__eq__.register
def __eq__(self, rhs: SjasmplusSnapshotProgram) -> bool:
return self.program == rhs.program and self.source_name == rhs._source_name
class Program(Ast):
def __init__(self, functions: List[Function], context: SourceContext):
super().__init__(context)
self.functions = functions
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_program(self)
@Program.__eq__.register
def _(self, rhs: Program) -> bool:
return self.functions == rhs.functions and self.context == rhs.context
@dataclass
class Parameter:
name: str
type_id: Type
class Function(Ast):
def __init__(
self,
name: str,
parameters: List[Parameter],
return_type: Type,
code_block: Block,
context: SourceContext,
):
super().__init__(context)
self.name = name
self.parameters = parameters
self.return_type = return_type
self.code_block = code_block
self.type = Callable(return_type, [p.type_id for p in parameters])
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_function(self)
@Function.__eq__.register
def _(self, rhs: Function) -> bool:
return (
self.name == rhs.name
and self.parameters == rhs.parameters
and self.return_type == rhs.return_type
and self.code_block == rhs.code_block
)
class Block(Ast):
def __init__(self, statements: [Ast], context: SourceContext):
super().__init__(context)
self.statements = statements
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_block(self)
@Block.__eq__.register
def _(self, rhs: Block) -> bool:
return self.statements == rhs.statements
class If(Ast):
def __init__(self, condition: Ast, consequence: Ast, context: SourceContext):
super().__init__(context)
self.condition = condition
self.consequence = consequence
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_if(self)
@If.__eq__.register
def _(self, rhs: If) -> bool:
return self.condition == rhs.condition and self.consequence == rhs.consequence
class Print(Ast):
def __init__(self, expression: Ast, context: SourceContext):
super().__init__(context)
self.expression = expression
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_print(self)
@Print.__eq__.register
def _(self, rhs: Print) -> bool:
return self.expression == rhs.expression and self.context == rhs.context
class Let(Ast):
def __init__(self, name: str, var_type: Type, rhs: Ast, context: SourceContext):
super().__init__(context)
self.name = name
self.var_type = var_type
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_let(self)
@Let.__eq__.register
def _(self, rhs: Let) -> bool:
return (
self.name == rhs.name
and self.var_type == rhs.var_type
and self.rhs == rhs.rhs
and self.context == rhs.context
)
class Assignment(Ast):
def __init__(self, name: str, rhs: Ast, context: SourceContext):
super().__init__(context)
self.name = name
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_assignment(self)
@Assignment.__eq__.register
def _(self, rhs: Assignment) -> bool:
return self.name == rhs.name and self.rhs == rhs.rhs and self.context == rhs.context
class Return(Ast):
def __init__(self, expr: Ast, context: SourceContext):
super().__init__(context)
self.expr = expr
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_return(self)
@Return.__eq__.register
def _(self, rhs: Return) -> bool:
return self.expr == rhs.expr
class Equal(Ast):
def __init__(self, lhs: Ast, rhs: Ast, context: SourceContext):
super().__init__(context)
self.lhs = lhs
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_equal(self)
@Equal.__eq__.register
def _(self, rhs: Equal) -> bool:
return self.lhs == rhs.lhs and self.rhs == rhs.rhs and self.context == rhs.context
class NotEqual(Ast):
def __init__(self, lhs: Ast, rhs: Ast, context: SourceContext):
super().__init__(context)
self.lhs = lhs
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_not_equal(self)
@NotEqual.__eq__.register
def _(self, rhs: NotEqual) -> bool:
return self.lhs == rhs.lhs and self.rhs == rhs.rhs and self.context == rhs.context
class Addition(Ast):
def __init__(self, lhs: Ast, rhs: Ast, context: SourceContext):
super().__init__(context)
self.lhs = lhs
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_addition(self)
@Addition.__eq__.register
def _(self, rhs: Addition) -> bool:
return self.lhs == rhs.lhs and self.rhs == rhs.rhs and self.context == rhs.context
class Subtraction(Ast):
def __init__(self, lhs: Ast, rhs: Ast, context: SourceContext):
super().__init__(context)
self.lhs = lhs
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_subtraction(self)
@Subtraction.__eq__.register
def _(self, rhs: Subtraction) -> bool:
return self.lhs == rhs.lhs and self.rhs == rhs.rhs and self.context == rhs.context
class Negation(Ast):
def __init__(self, expression: Ast, context: SourceContext):
super().__init__(context)
self.expression = expression
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_negation(self)
@Negation.__eq__.register
def _(self, rhs: Negation) -> bool:
return self.expression == rhs.expression and self.context == rhs.context
class FunctionCall(Ast):
def __init__(
self, function_name: str, arguments: List[Ast], context: SourceContext
):
super().__init__(context)
self.function_name = function_name
self.arguments = arguments
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_function_call(self)
@FunctionCall.__eq__.register
def _(self, rhs: FunctionCall) -> bool:
return (
self.function_name == rhs.function_name
and self.arguments == rhs.arguments
and self.context == rhs.context
)
class Identifier(Ast):
def __init__(self, value: int, context: SourceContext):
super().__init__(context)
self.value = value
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_identifier(self)
@Identifier.__eq__.register
def _(self, rhs: Identifier) -> bool:
return (
isinstance(rhs, Identifier)
and self.value == rhs.value
and self.context == self.context
)
class Unsignedint(Ast):
def __init__(self, value: int, context: SourceContext):
super().__init__(context)
self.value = value
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_unsignedint(self)
@Unsignedint.__eq__.register
def _(self, rhs: Unsignedint) -> bool:
return (
isinstance(rhs, Unsignedint)
and self.value == rhs.value
and self.context == self.context
)
class Bool(Ast):
def __init__(self, value: bool, context: SourceContext):
super().__init__(context)
self.value = value
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_bool(self)
@Bool.__eq__.register
def _(self, rhs: Bool) -> bool:
return (
isinstance(rhs, Bool)
and self.value == rhs.value
and self.context == self.context
)
| 7,223
| 3,096
| 901
|
12d6ccc9bc22866f30ca1c766583f034776a1025
| 4,946
|
py
|
Python
|
src/lib/recorder.py
|
l-maia/viseron
|
d762be93db74f780db13ac332bf8673c41592aa9
|
[
"MIT"
] | null | null | null |
src/lib/recorder.py
|
l-maia/viseron
|
d762be93db74f780db13ac332bf8673c41592aa9
|
[
"MIT"
] | null | null | null |
src/lib/recorder.py
|
l-maia/viseron
|
d762be93db74f780db13ac332bf8673c41592aa9
|
[
"MIT"
] | null | null | null |
import datetime
import logging
import os
from threading import Thread
import cv2
from lib.cleanup import SegmentCleanup
from lib.helpers import draw_objects
from lib.mqtt.camera import MQTTCamera
from lib.segments import Segments
LOGGER = logging.getLogger(__name__)
| 35.84058
| 88
| 0.641326
|
import datetime
import logging
import os
from threading import Thread
import cv2
from lib.cleanup import SegmentCleanup
from lib.helpers import draw_objects
from lib.mqtt.camera import MQTTCamera
from lib.segments import Segments
LOGGER = logging.getLogger(__name__)
class FFMPEGRecorder:
def __init__(self, config, detection_lock, mqtt_queue):
self._logger = logging.getLogger(__name__ + "." + config.camera.name_slug)
if getattr(config.recorder.logging, "level", None):
self._logger.setLevel(config.recorder.logging.level)
elif getattr(config.camera.logging, "level", None):
self._logger.setLevel(config.camera.logging.level)
self._logger.debug("Initializing ffmpeg recorder")
self.config = config
self._mqtt_queue = mqtt_queue
self.is_recording = False
self.last_recording_start = None
self.last_recording_end = None
self._event_start = None
self._event_end = None
self._recording_name = None
segments_folder = os.path.join(
config.recorder.segments_folder, config.camera.name
)
self.create_directory(segments_folder)
self._segmenter = Segments(
self._logger, config, segments_folder, detection_lock
)
self._segment_cleanup = SegmentCleanup(config)
self._mqtt_devices = {}
if self.config.recorder.thumbnail.send_to_mqtt:
self._mqtt_devices["latest_thumbnail"] = MQTTCamera(
config, mqtt_queue, object_id="latest_thumbnail"
)
def on_connect(self, client):
for device in self._mqtt_devices.values():
device.on_connect(client)
def subfolder_name(self, today):
return (
f"{today.year:04}-{today.month:02}-{today.day:02}/{self.config.camera.name}"
)
def create_thumbnail(self, file_name, frame, objects, resolution):
draw_objects(
frame.decoded_frame_umat_rgb, objects, resolution,
)
cv2.imwrite(file_name, frame.decoded_frame_umat_rgb)
if self.config.recorder.thumbnail.save_to_disk:
thumbnail_folder = os.path.join(
self.config.recorder.folder, "thumbnails", self.config.camera.name
)
self.create_directory(thumbnail_folder)
self._logger.debug(f"Saving thumbnail in {thumbnail_folder}")
if not cv2.imwrite(
os.path.join(thumbnail_folder, "latest_thumbnail.jpg"),
frame.decoded_frame_umat_rgb,
):
self._logger.error("Failed saving thumbnail to disk")
if self.config.recorder.thumbnail.send_to_mqtt and self._mqtt_devices:
ret, jpg = cv2.imencode(".jpg", frame.decoded_frame_umat_rgb)
if ret:
self._mqtt_devices["latest_thumbnail"].publish(jpg.tobytes())
def create_directory(self, path):
try:
if not os.path.isdir(path):
self._logger.debug(f"Creating folder {path}")
os.makedirs(path)
except FileExistsError:
pass
def start_recording(self, frame, objects, resolution):
self._logger.info("Starting recorder")
self.is_recording = True
self._segment_cleanup.pause()
now = datetime.datetime.now()
self.last_recording_start = now.isoformat()
self.last_recording_end = None
self._event_start = int(now.timestamp())
if self.config.recorder.folder is None:
self._logger.error("Output directory is not specified")
return
# Create filename
now = datetime.datetime.now()
video_name = f"{now.strftime('%H:%M:%S')}.{self.config.recorder.extension}"
thumbnail_name = f"{now.strftime('%H:%M:%S')}.jpg"
# Create foldername
subfolder = self.subfolder_name(now)
full_path = os.path.join(self.config.recorder.folder, subfolder)
self.create_directory(full_path)
if frame:
self.create_thumbnail(
os.path.join(full_path, thumbnail_name), frame, objects, resolution
)
self._recording_name = os.path.join(full_path, video_name)
def concat_segments(self):
self._segmenter.concat_segments(
self._event_start - self.config.recorder.lookback,
self._event_end,
self._recording_name,
)
# Dont resume cleanup if new recording started during encoding
if not self.is_recording:
self._segment_cleanup.resume()
def stop_recording(self):
self._logger.info("Stopping recorder")
self.is_recording = False
now = datetime.datetime.now()
self.last_recording_end = now.isoformat()
self._event_end = int(now.timestamp())
concat_thread = Thread(target=self.concat_segments)
concat_thread.start()
| 4,437
| 0
| 238
|
6ad5c56d611d041bd4e20428bfb9dda30e760ae2
| 810
|
py
|
Python
|
src/USEFUL/basic_examples/example_setdefault.py
|
binxiangni/Python-and-Algorithms-and-Data-Structures
|
d2c082d261a68b06f533703867ae8a90ac7f4df1
|
[
"MIT"
] | 5
|
2017-08-03T06:33:49.000Z
|
2021-08-06T13:20:57.000Z
|
src/USEFUL/basic_examples/example_setdefault.py
|
ritahu/Python-and-Algorithms-and-Data-Structures
|
d2c082d261a68b06f533703867ae8a90ac7f4df1
|
[
"MIT"
] | null | null | null |
src/USEFUL/basic_examples/example_setdefault.py
|
ritahu/Python-and-Algorithms-and-Data-Structures
|
d2c082d261a68b06f533703867ae8a90ac7f4df1
|
[
"MIT"
] | 6
|
2017-04-27T13:30:49.000Z
|
2020-11-01T20:28:55.000Z
|
#!/usr/bin/env python
__author__ = "bt3"
if __name__ == '__main__':
test_setdef()
| 20.769231
| 51
| 0.549383
|
#!/usr/bin/env python
__author__ = "bt3"
def usual_dict(dict_data):
newdata = {}
for k, v in dict_data:
if k in newdata:
newdata[k].append(v)
else:
newdata[k] = [v]
return newdata
def setdefault_dict(dict_data):
newdata = {}
for k, v in dict_data:
newdata.setdefault(k, []).append(v)
return newdata
def test_setdef(module_name='this module'):
dict_data = (('key1', 'value1'),
('key1', 'value2'),
('key2', 'value3'),
('key2', 'value4'),
('key2', 'value5'),)
print(usual_dict(dict_data))
print(setdefault_dict(dict_data))
s = 'Tests in {name} have {con}!'
print(s.format(name=module_name, con='passed'))
if __name__ == '__main__':
test_setdef()
| 648
| 0
| 69
|
aa3156cdaa41c2efd05f58391fef51ddc65a1c89
| 423
|
py
|
Python
|
examples/PyObjC/pbplugin/PyTestPlugin.py
|
flupke/py2app
|
8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d
|
[
"MIT"
] | 81
|
2015-11-29T12:17:39.000Z
|
2021-08-02T07:06:51.000Z
|
examples/PyObjC/pbplugin/PyTestPlugin.py
|
flupke/py2app
|
8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d
|
[
"MIT"
] | 11
|
2016-10-23T16:34:10.000Z
|
2022-01-30T05:45:54.000Z
|
examples/PyObjC/pbplugin/PyTestPlugin.py
|
flupke/py2app
|
8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d
|
[
"MIT"
] | 21
|
2016-01-25T18:46:31.000Z
|
2021-01-08T17:38:03.000Z
|
from Foundation import *
import objc
import sys
print "PyTestPlugin", __name__
print u"[inside] currentBundle %r" % (objc.currentBundle(),)
| 23.5
| 60
| 0.664303
|
from Foundation import *
import objc
import sys
class PyTestPlugin(NSObject):
def init(self):
self = super(PyTestPlugin, self).init()
print 'class load!!'
print "Hello from py2app"
print "frozen", repr(getattr(sys, "frozen", None))
return self
class PyTestPlugin2(NSObject):
pass
print "PyTestPlugin", __name__
print u"[inside] currentBundle %r" % (objc.currentBundle(),)
| 184
| 26
| 72
|
4daa46c2152e35f2d6fed9c1e7f117f7a7694955
| 439
|
py
|
Python
|
tests/grammar/grammars/simple.py
|
AlexandreH/securify2
|
2d2ba0e1c20cdda550120ecdc1a7164db9b90e3c
|
[
"Apache-2.0"
] | 258
|
2020-01-23T16:58:38.000Z
|
2022-03-31T17:29:25.000Z
|
tests/grammar/grammars/simple.py
|
sirhashalot/securify2
|
6852707449577add14bafce8e304946b3490a977
|
[
"Apache-2.0"
] | 34
|
2020-01-30T06:11:58.000Z
|
2022-02-27T07:53:17.000Z
|
tests/grammar/grammars/simple.py
|
sirhashalot/securify2
|
6852707449577add14bafce8e304946b3490a977
|
[
"Apache-2.0"
] | 66
|
2020-01-28T09:23:05.000Z
|
2022-03-22T09:01:43.000Z
|
from __future__ import annotations
from typing import Sequence, Union, Optional
from securify.grammar import abstract_production, production
@abstract_production
@abstract_production
@production
@production
@production
@production
| 12.911765
| 60
| 0.738041
|
from __future__ import annotations
from typing import Sequence, Union, Optional
from securify.grammar import abstract_production, production
@abstract_production
class Base:
pass
@abstract_production
class AOrC(Base):
pass
@production
class A(AOrC, Base):
optional: Optional[Base]
@production
class B(Base):
seq: Sequence[AOrC]
@production
class C(AOrC, Base):
single: B
@production
class E(Base):
pass
| 0
| 64
| 132
|
ab7e4c8ae6107856ac778d397edffb130f2bed1a
| 2,490
|
py
|
Python
|
includes/vars.py
|
jerseyshawn/cf-vcap-vars
|
26effac112b500271e2f5ed298f0e6ab50bd7c4e
|
[
"MIT"
] | null | null | null |
includes/vars.py
|
jerseyshawn/cf-vcap-vars
|
26effac112b500271e2f5ed298f0e6ab50bd7c4e
|
[
"MIT"
] | null | null | null |
includes/vars.py
|
jerseyshawn/cf-vcap-vars
|
26effac112b500271e2f5ed298f0e6ab50bd7c4e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from os import environ
from json import dumps
| 35.571429
| 83
| 0.485944
|
#!/usr/bin/env python3
from os import environ
from json import dumps
class CloudFoundry:
def __init__(self, **kwargs):
self.__CF_VARIABLES__ = {'CF_INSTANCE_ADDR': '',
'CF_INSTANCE_GUID': '',
'CF_INSTANCE_INDEX': '',
'CF_INSTANCE_INTERNAL_IP': '',
'CF_INSTANCE_IP': '',
'CF_INSTANCE_PORT': '',
'CF_INSTANCE_PORTS': [{}],
'CF_STACK': '',
'DATABASE_URL': '',
'HOME': '',
'INSTANCE_GUID': '',
'INSTANCE_INDEX': '',
'LANG': '',
'MEMORY_LIMIT': '',
'PATH': '',
'PORT': '',
'PWD': '',
'TMPDIR': '',
'USER': '',
'VCAP_APP_HOST': '',
'VCAP_APP_PORT': '',
'VCAP_APPLICATION': {},
'VCAP_SERVICES': {}}
if kwargs.get('testing'):
self.load_testing_data(**kwargs)
else:
self.set_cf_variables(**kwargs)
def load_testing_data(self, **kwargs):
pass
def set_cf_variables(self, **kwargs):
variables = kwargs.get('variables', None)
if isinstance(variables, str):
cf_variables = [variable.upper() for variable in variables.split(',')]
elif isinstance(variables, list):
cf_variables = [variable.upper() for variable in variables]
else:
cf_variables = self.__CF_VARIABLES__
for cf_variable in cf_variables:
# found in env
if cf_variable in environ:
setattr(self,str(cf_variable).lower(),environ[cf_variable])
# not in env, but a known cf var
elif cf_variable in self.__CF_VARIABLES__:
setattr(self, str(cf_variable).lower(), self.__CF_VARIABLES__[cf_variable])
# not in env and not defaulted
else:
setattr(self, str(cf_variable).lower(), '')
def get_cf_variables(self, **kwargs):
variables = {}
for variable in sorted(self.__CF_VARIABLES__):
variable = variable.lower()
if hasattr(self, variable):
variables[variable] = getattr(self,variable)
print(dumps(variables, indent=4))
return(variables)
| 2,297
| -2
| 121
|
5e95808d29aa13d8d01042969663da9c93db20f1
| 212
|
py
|
Python
|
virtual_box_tools/custom_argument_parser.py
|
FunTimeCoding/virtualbox-tools
|
bc6b4ad90d711fda64f76275f65e6c01ae3caa3f
|
[
"MIT"
] | 4
|
2017-08-05T10:51:34.000Z
|
2019-06-24T02:56:31.000Z
|
virtual_box_tools/custom_argument_parser.py
|
FunTimeCoding/virtualbox-tools
|
bc6b4ad90d711fda64f76275f65e6c01ae3caa3f
|
[
"MIT"
] | null | null | null |
virtual_box_tools/custom_argument_parser.py
|
FunTimeCoding/virtualbox-tools
|
bc6b4ad90d711fda64f76275f65e6c01ae3caa3f
|
[
"MIT"
] | 1
|
2017-08-05T09:23:51.000Z
|
2017-08-05T09:23:51.000Z
|
from argparse import ArgumentParser
from sys import exit, stderr
| 21.2
| 45
| 0.693396
|
from argparse import ArgumentParser
from sys import exit, stderr
class CustomArgumentParser(ArgumentParser):
def error(self, message) -> None:
stderr.write('Error: %s\n' % message)
exit(1)
| 75
| 22
| 49
|
f6bbb564e37b6b680c5c92655011416fe930bcbf
| 2,043
|
py
|
Python
|
vega/__init__.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/__init__.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/__init__.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vega's methods."""
__all__ = [
"set_backend",
"is_cpu_device", "is_gpu_device", "is_npu_device",
"is_ms_backend", "is_tf_backend", "is_torch_backend",
"get_devices",
"ClassFactory", "ClassType",
"FileOps",
"run",
"init_cluster_args",
"module_existed",
"TrialAgent",
"get_network",
"get_dataset",
"get_trainer",
"get_quota",
]
__version__ = "1.8.0"
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python < 3.6 is not supported.')
from .common.backend_register import set_backend, is_cpu_device, is_gpu_device, is_npu_device, \
is_ms_backend, is_tf_backend, is_torch_backend, get_devices
from .common.class_factory import ClassFactory, ClassType
from .common.file_ops import FileOps
from .core import run, init_cluster_args, module_existed
from .trainer.trial_agent import TrialAgent
from . import quota
def get_network(name, **kwargs):
"""Return network."""
return ClassFactory.get_cls(ClassType.NETWORK, name)(**kwargs)
def get_dataset(name, **kwargs):
"""Return dataset."""
return ClassFactory.get_cls(ClassType.DATASET, name)(**kwargs)
def get_trainer(name="Trainer", **kwargs):
"""Return trainer."""
return ClassFactory.get_cls(ClassType.TRAINER, name)(**kwargs)
def get_quota(**kwargs):
"""Return quota."""
return ClassFactory.get_cls(ClassType.QUOTA, "Quota")(**kwargs)
| 28.375
| 96
| 0.714146
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vega's methods."""
__all__ = [
"set_backend",
"is_cpu_device", "is_gpu_device", "is_npu_device",
"is_ms_backend", "is_tf_backend", "is_torch_backend",
"get_devices",
"ClassFactory", "ClassType",
"FileOps",
"run",
"init_cluster_args",
"module_existed",
"TrialAgent",
"get_network",
"get_dataset",
"get_trainer",
"get_quota",
]
__version__ = "1.8.0"
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python < 3.6 is not supported.')
from .common.backend_register import set_backend, is_cpu_device, is_gpu_device, is_npu_device, \
is_ms_backend, is_tf_backend, is_torch_backend, get_devices
from .common.class_factory import ClassFactory, ClassType
from .common.file_ops import FileOps
from .core import run, init_cluster_args, module_existed
from .trainer.trial_agent import TrialAgent
from . import quota
def get_network(name, **kwargs):
"""Return network."""
return ClassFactory.get_cls(ClassType.NETWORK, name)(**kwargs)
def get_dataset(name, **kwargs):
"""Return dataset."""
return ClassFactory.get_cls(ClassType.DATASET, name)(**kwargs)
def get_trainer(name="Trainer", **kwargs):
"""Return trainer."""
return ClassFactory.get_cls(ClassType.TRAINER, name)(**kwargs)
def get_quota(**kwargs):
"""Return quota."""
return ClassFactory.get_cls(ClassType.QUOTA, "Quota")(**kwargs)
| 0
| 0
| 0
|
a3fd214bd3ac94e9556d5163c8b69ad52bfd9956
| 13,033
|
py
|
Python
|
colour/models/tests/test_cie_lab.py
|
MaxSchambach/colour
|
3f3685d616fda4be58cec20bc1e16194805d7e2d
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/tests/test_cie_lab.py
|
MaxSchambach/colour
|
3f3685d616fda4be58cec20bc1e16194805d7e2d
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/tests/test_cie_lab.py
|
MaxSchambach/colour
|
3f3685d616fda4be58cec20bc1e16194805d7e2d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.cie_lab` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from itertools import permutations
from colour.models import XYZ_to_Lab, Lab_to_XYZ, Lab_to_LCHab, LCHab_to_Lab
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'TestXYZ_to_Lab', 'TestLab_to_XYZ', 'TestLab_to_LCHab', 'TestLCHab_to_Lab'
]
class TestXYZ_to_Lab(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.XYZ_to_Lab` definition unit tests
methods.
"""
def test_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition.
"""
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.20654008, 0.12197225, 0.05136952])),
np.array([41.52787529, 52.63858304, 26.92317922]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.14222010, 0.23042768, 0.10495772])),
np.array([55.11636304, -41.08791787, 30.91825778]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.07818780, 0.06157201, 0.28099326])),
np.array([29.80565520, 20.01830466, -48.34913874]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.44757, 0.40745])),
np.array([41.52787529, 38.48089305, -5.73295122]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.34570, 0.35850])),
np.array([41.52787529, 51.19354174, 19.91843098]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.34570, 0.35850, 1.00000])),
np.array([41.52787529, 51.19354174, 19.91843098]),
decimal=7)
def test_n_dimensional_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition n-dimensional
support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = np.array([0.31270, 0.32900])
Lab = XYZ_to_Lab(XYZ, illuminant)
XYZ = np.tile(XYZ, (6, 1))
Lab = np.tile(Lab, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7)
illuminant = np.tile(illuminant, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7)
XYZ = np.reshape(XYZ, (2, 3, 3))
illuminant = np.reshape(illuminant, (2, 3, 2))
Lab = np.reshape(Lab, (2, 3, 3))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7)
def test_domain_range_scale_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition
domain and range scale support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = np.array([0.31270, 0.32900])
Lab = XYZ_to_Lab(XYZ, illuminant)
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ * factor_a, illuminant),
Lab * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
illuminant = np.array(case[0:2])
XYZ_to_Lab(XYZ, illuminant)
class TestLab_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.Lab_to_XYZ` definition unit tests
methods.
"""
def test_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition.
"""
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([41.52787529, 52.63858304, 26.92317922])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([55.11636304, -41.08791787, 30.91825778])),
np.array([0.14222010, 0.23042768, 0.10495772]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([29.80565520, 20.01830466, -48.34913874])),
np.array([0.07818780, 0.06157201, 0.28099326]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 38.48089305, -5.73295122]),
np.array([0.44757, 0.40745])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 51.19354174, 19.91843098]),
np.array([0.34570, 0.35850])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 51.19354174, 19.91843098]),
np.array([0.34570, 0.35850, 1.00000])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
def test_n_dimensional_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition n-dimensional
support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
illuminant = np.array([0.31270, 0.32900])
XYZ = Lab_to_XYZ(Lab, illuminant)
Lab = np.tile(Lab, (6, 1))
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7)
illuminant = np.tile(illuminant, (6, 1))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7)
Lab = np.reshape(Lab, (2, 3, 3))
illuminant = np.reshape(illuminant, (2, 3, 2))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7)
def test_domain_range_scale_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition
domain and range scale support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
illuminant = np.array([0.31270, 0.32900])
XYZ = Lab_to_XYZ(Lab, illuminant)
d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab * factor_a, illuminant),
XYZ * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
Lab = np.array(case)
illuminant = np.array(case[0:2])
Lab_to_XYZ(Lab, illuminant)
class TestLab_to_LCHab(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.Lab_to_LCHab` definition unit tests
methods.
"""
def test_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition.
"""
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([41.52787529, 52.63858304, 26.92317922])),
np.array([41.52787529, 59.12425901, 27.08848784]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([55.11636304, -41.08791787, 30.91825778])),
np.array([55.11636304, 51.42135412, 143.03889556]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([29.80565520, 20.01830466, -48.34913874])),
np.array([29.80565520, 52.32945383, 292.49133666]),
decimal=7)
def test_n_dimensional_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition
n-dimensional arrays support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
LCHab = Lab_to_LCHab(Lab)
Lab = np.tile(Lab, (6, 1))
LCHab = np.tile(LCHab, (6, 1))
np.testing.assert_almost_equal(Lab_to_LCHab(Lab), LCHab, decimal=7)
Lab = np.reshape(Lab, (2, 3, 3))
LCHab = np.reshape(LCHab, (2, 3, 3))
np.testing.assert_almost_equal(Lab_to_LCHab(Lab), LCHab, decimal=7)
def test_domain_range_scale_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition domain and
range scale support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
LCHab = Lab_to_LCHab(Lab)
d_r = (('reference', 1, 1), (1, 0.01, np.array([0.01, 0.01, 1 / 360])),
(100, 1, np.array([1, 1, 1 / 3.6])))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
Lab_to_LCHab(Lab * factor_a), LCHab * factor_b, decimal=7)
@ignore_numpy_errors
def test_nan_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
Lab = np.array(case)
Lab_to_LCHab(Lab)
class TestLCHab_to_Lab(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.LCHab_to_Lab` definition unit tests
methods.
"""
def test_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition.
"""
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([41.52787529, 59.12425901, 27.08848784])),
np.array([41.52787529, 52.63858304, 26.92317922]),
decimal=7)
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([55.11636304, 51.42135412, 143.03889556])),
np.array([55.11636304, -41.08791787, 30.91825778]),
decimal=7)
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([29.80565520, 52.32945383, 292.49133666])),
np.array([29.80565520, 20.01830466, -48.34913874]),
decimal=7)
def test_n_dimensional_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition
n-dimensional arrays support.
"""
LCHab = np.array([41.52787529, 59.12425901, 27.08848784])
Lab = LCHab_to_Lab(LCHab)
LCHab = np.tile(LCHab, (6, 1))
Lab = np.tile(Lab, (6, 1))
np.testing.assert_almost_equal(LCHab_to_Lab(LCHab), Lab, decimal=7)
LCHab = np.reshape(LCHab, (2, 3, 3))
Lab = np.reshape(Lab, (2, 3, 3))
np.testing.assert_almost_equal(LCHab_to_Lab(LCHab), Lab, decimal=7)
def test_domain_range_scale_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition domain and
range scale support.
"""
LCHab = np.array([41.52787529, 59.12425901, 27.08848784])
Lab = LCHab_to_Lab(LCHab)
d_r = (('reference', 1, 1), (1, np.array([0.01, 0.01, 1 / 360]), 0.01),
(100, np.array([1, 1, 1 / 3.6]), 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
LCHab_to_Lab(LCHab * factor_a), Lab * factor_b, decimal=7)
@ignore_numpy_errors
def test_nan_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
LCHab = np.array(case)
LCHab_to_Lab(LCHab)
if __name__ == '__main__':
unittest.main()
| 34.028721
| 79
| 0.58559
|
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.cie_lab` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from itertools import permutations
from colour.models import XYZ_to_Lab, Lab_to_XYZ, Lab_to_LCHab, LCHab_to_Lab
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'TestXYZ_to_Lab', 'TestLab_to_XYZ', 'TestLab_to_LCHab', 'TestLCHab_to_Lab'
]
class TestXYZ_to_Lab(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.XYZ_to_Lab` definition unit tests
methods.
"""
def test_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition.
"""
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.20654008, 0.12197225, 0.05136952])),
np.array([41.52787529, 52.63858304, 26.92317922]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.14222010, 0.23042768, 0.10495772])),
np.array([55.11636304, -41.08791787, 30.91825778]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.07818780, 0.06157201, 0.28099326])),
np.array([29.80565520, 20.01830466, -48.34913874]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.44757, 0.40745])),
np.array([41.52787529, 38.48089305, -5.73295122]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.34570, 0.35850])),
np.array([41.52787529, 51.19354174, 19.91843098]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.34570, 0.35850, 1.00000])),
np.array([41.52787529, 51.19354174, 19.91843098]),
decimal=7)
def test_n_dimensional_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition n-dimensional
support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = np.array([0.31270, 0.32900])
Lab = XYZ_to_Lab(XYZ, illuminant)
XYZ = np.tile(XYZ, (6, 1))
Lab = np.tile(Lab, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7)
illuminant = np.tile(illuminant, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7)
XYZ = np.reshape(XYZ, (2, 3, 3))
illuminant = np.reshape(illuminant, (2, 3, 2))
Lab = np.reshape(Lab, (2, 3, 3))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7)
def test_domain_range_scale_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition
domain and range scale support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = np.array([0.31270, 0.32900])
Lab = XYZ_to_Lab(XYZ, illuminant)
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ * factor_a, illuminant),
Lab * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
illuminant = np.array(case[0:2])
XYZ_to_Lab(XYZ, illuminant)
class TestLab_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.Lab_to_XYZ` definition unit tests
methods.
"""
def test_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition.
"""
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([41.52787529, 52.63858304, 26.92317922])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([55.11636304, -41.08791787, 30.91825778])),
np.array([0.14222010, 0.23042768, 0.10495772]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([29.80565520, 20.01830466, -48.34913874])),
np.array([0.07818780, 0.06157201, 0.28099326]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 38.48089305, -5.73295122]),
np.array([0.44757, 0.40745])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 51.19354174, 19.91843098]),
np.array([0.34570, 0.35850])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 51.19354174, 19.91843098]),
np.array([0.34570, 0.35850, 1.00000])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
def test_n_dimensional_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition n-dimensional
support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
illuminant = np.array([0.31270, 0.32900])
XYZ = Lab_to_XYZ(Lab, illuminant)
Lab = np.tile(Lab, (6, 1))
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7)
illuminant = np.tile(illuminant, (6, 1))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7)
Lab = np.reshape(Lab, (2, 3, 3))
illuminant = np.reshape(illuminant, (2, 3, 2))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7)
def test_domain_range_scale_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition
domain and range scale support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
illuminant = np.array([0.31270, 0.32900])
XYZ = Lab_to_XYZ(Lab, illuminant)
d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab * factor_a, illuminant),
XYZ * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
Lab = np.array(case)
illuminant = np.array(case[0:2])
Lab_to_XYZ(Lab, illuminant)
class TestLab_to_LCHab(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.Lab_to_LCHab` definition unit tests
methods.
"""
def test_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition.
"""
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([41.52787529, 52.63858304, 26.92317922])),
np.array([41.52787529, 59.12425901, 27.08848784]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([55.11636304, -41.08791787, 30.91825778])),
np.array([55.11636304, 51.42135412, 143.03889556]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([29.80565520, 20.01830466, -48.34913874])),
np.array([29.80565520, 52.32945383, 292.49133666]),
decimal=7)
def test_n_dimensional_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition
n-dimensional arrays support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
LCHab = Lab_to_LCHab(Lab)
Lab = np.tile(Lab, (6, 1))
LCHab = np.tile(LCHab, (6, 1))
np.testing.assert_almost_equal(Lab_to_LCHab(Lab), LCHab, decimal=7)
Lab = np.reshape(Lab, (2, 3, 3))
LCHab = np.reshape(LCHab, (2, 3, 3))
np.testing.assert_almost_equal(Lab_to_LCHab(Lab), LCHab, decimal=7)
def test_domain_range_scale_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition domain and
range scale support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
LCHab = Lab_to_LCHab(Lab)
d_r = (('reference', 1, 1), (1, 0.01, np.array([0.01, 0.01, 1 / 360])),
(100, 1, np.array([1, 1, 1 / 3.6])))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
Lab_to_LCHab(Lab * factor_a), LCHab * factor_b, decimal=7)
@ignore_numpy_errors
def test_nan_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
Lab = np.array(case)
Lab_to_LCHab(Lab)
class TestLCHab_to_Lab(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.LCHab_to_Lab` definition unit tests
methods.
"""
def test_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition.
"""
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([41.52787529, 59.12425901, 27.08848784])),
np.array([41.52787529, 52.63858304, 26.92317922]),
decimal=7)
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([55.11636304, 51.42135412, 143.03889556])),
np.array([55.11636304, -41.08791787, 30.91825778]),
decimal=7)
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([29.80565520, 52.32945383, 292.49133666])),
np.array([29.80565520, 20.01830466, -48.34913874]),
decimal=7)
def test_n_dimensional_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition
n-dimensional arrays support.
"""
LCHab = np.array([41.52787529, 59.12425901, 27.08848784])
Lab = LCHab_to_Lab(LCHab)
LCHab = np.tile(LCHab, (6, 1))
Lab = np.tile(Lab, (6, 1))
np.testing.assert_almost_equal(LCHab_to_Lab(LCHab), Lab, decimal=7)
LCHab = np.reshape(LCHab, (2, 3, 3))
Lab = np.reshape(Lab, (2, 3, 3))
np.testing.assert_almost_equal(LCHab_to_Lab(LCHab), Lab, decimal=7)
def test_domain_range_scale_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition domain and
range scale support.
"""
LCHab = np.array([41.52787529, 59.12425901, 27.08848784])
Lab = LCHab_to_Lab(LCHab)
d_r = (('reference', 1, 1), (1, np.array([0.01, 0.01, 1 / 360]), 0.01),
(100, np.array([1, 1, 1 / 3.6]), 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
LCHab_to_Lab(LCHab * factor_a), Lab * factor_b, decimal=7)
@ignore_numpy_errors
def test_nan_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
LCHab = np.array(case)
LCHab_to_Lab(LCHab)
if __name__ == '__main__':
unittest.main()
| 0
| 0
| 0
|
84c76c41a480dae4b33646b4f0e6c9ccbdced4c9
| 17,510
|
py
|
Python
|
letype_extractor.py
|
olzama/neural-supertagging
|
340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4
|
[
"MIT"
] | null | null | null |
letype_extractor.py
|
olzama/neural-supertagging
|
340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4
|
[
"MIT"
] | null | null | null |
letype_extractor.py
|
olzama/neural-supertagging
|
340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4
|
[
"MIT"
] | null | null | null |
from delphin import tdl, itsdb
from delphin.tokens import YYTokenLattice
import glob, sys, pathlib
import json, pickle
import numpy as np
from collections import OrderedDict
import pos_map
from datetime import datetime
CONTEXT_WINDOW = 2
DEV = ['ws212', 'ecpa']
TEST = ['cb', 'ecpr', 'jhk', 'jhu', 'tgk', 'tgu', 'psk', 'psu', #'rondane',
'vm32', 'ws213', 'ws214', 'petet', 'wsj23']
IGNORE = ['ntucle', 'omw', 'wlb03', 'wnb03']
NONTRAIN = DEV + TEST + IGNORE
class LexTypeExtractor:
'''
Assume a numpy table coming in. Get e.g. tokens 2 through 5 in sentences 4 and 5,
for the test suite #20 in the data.
'''
if __name__ == "__main__":
args = sys.argv[1:]
dt_str = '-'.join(str(datetime.now()).split()).replace(':','.')
run_id = sys.argv[3] + dt_str
if len(sys.argv) > 3:
autoreg = sys.argv[4] == 'autoreg'
out_dir = './output/' + run_id
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=False)
le = LexTypeExtractor()
le.parse_lexicons(args[0])
le.stats['total lextypes'] = len(le.lextypes)
if autoreg:
le.process_testsuites_autoreg(args[1],le.lextypes,out_dir)
else:
le.process_testsuites_nonautoreg(args[1],le.lextypes,out_dir)
with open(out_dir + '/lextypes','wb') as f:
lextypes = set([str(v) for v in list(le.lextypes.values())])
pickle.dump(lextypes,f)
| 46.693333
| 116
| 0.525871
|
from delphin import tdl, itsdb
from delphin.tokens import YYTokenLattice
import glob, sys, pathlib
import json, pickle
import numpy as np
from collections import OrderedDict
import pos_map
from datetime import datetime
CONTEXT_WINDOW = 2
DEV = ['ws212', 'ecpa']
TEST = ['cb', 'ecpr', 'jhk', 'jhu', 'tgk', 'tgu', 'psk', 'psu', #'rondane',
'vm32', 'ws213', 'ws214', 'petet', 'wsj23']
IGNORE = ['ntucle', 'omw', 'wlb03', 'wnb03']
NONTRAIN = DEV + TEST + IGNORE
class LexTypeExtractor:
def __init__(self):
self.stats = {'corpora': [], 'failed corpora': [], 'tokens': {}, 'total lextypes': 0}
def parse_lexicons(self,lexicons):
lextypes = {} # mapping of lexical entry IDs to types
for lexicon in glob.iglob(lexicons+'**'):
for event, obj, lineno in tdl.iterparse(lexicon):
if event == 'TypeDefinition':
lextypes[obj.identifier] = obj.supertypes[0] # assume exactly 1
self.lextypes = lextypes
def read_testsuites(self,path):
max_sen_length = 0
corpus_size = 0
data = {'train':{'by corpus':[], 'by length': {}},
'test':{'by corpus':[], 'by length': {}},
'dev':{'by corpus':[], 'by length': {}}}
print('Reading test suite files into pydelphin objects...')
n = 0
for idx in ['train','dev','test']:
t = 0
for i, tsuite in enumerate(sorted(glob.iglob(path + idx + '/**'))):
n += 1
ts = itsdb.TestSuite(tsuite)
if idx == 'train':
message = "A nontrain dataset {} is being added as training data!".format(ts.path.stem)
assert ts.path.stem not in NONTRAIN, message
data[idx]['by corpus'].append({'name':ts.path.stem})
items = list(ts.processed_items())
data[idx]['by corpus'][i]['sentences'] = {}
data[idx]['by corpus'][i]['tokens-tags'] = []
corpus_size += len(items)
for response in items:
if len(response['results']) > 0:
deriv = response.result(0).derivation()
terminals = deriv.terminals()
t += len(terminals)
p_input = response['p-input']
p_tokens = response['p-tokens']
terminals_tok_tags = self.map_lattice_to_input(p_input, p_tokens, deriv)
if len(terminals) not in data[idx]['by corpus'][i]['sentences']:
data[idx]['by corpus'][i]['sentences'][len(terminals)] = []
data[idx]['by corpus'][i]['sentences'][len(terminals)].append(terminals_tok_tags)
data[idx]['by corpus'][i]['tokens-tags'].append(terminals_tok_tags)
if len(terminals) > max_sen_length:
max_sen_length = len(terminals)
print('All raw {} tokens: {}'.format(idx,t))
t1 = 0
t2 = 0
if idx == 'train':
all_sentences = {}
for ts in data[idx]['by corpus']:
t1 += self.org_sen_by_length(all_sentences, ts)
for l in all_sentences:
for s in all_sentences[l]:
t2 += len(s)
data[idx]['by length'] = OrderedDict(sorted(all_sentences.items()))
else:
for ts in data[idx]['by corpus']:
all_sentences = {}
t1 += self.org_sen_by_length(all_sentences, ts)
data[idx]['by length'][ts['name']] = OrderedDict(sorted(all_sentences.items()))
for ts in data[idx]['by length']:
for l in data[idx]['by length'][ts]:
for s in data[idx]['by length'][ts][l]:
t2 += len(s)
print('Added {} {} tokens to the by-corpus table'.format(t1,idx))
print('Added {} {} tokens to the by-length table'.format(t2,idx))
return max_sen_length, corpus_size, n+1, data
def org_sen_by_length(self, all_sentences, ts):
n = 0
for l in ts['sentences']:
for s in ts['sentences'][l]:
n += len(s)
if l not in all_sentences:
all_sentences[l] = []
all_sentences[l] += ts['sentences'][l]
return n
def process_testsuites_autoreg(self,testsuites,lextypes, out_dir):
max_sen_length, corpus_size, num_ts, data = self.read_testsuites(testsuites)
tables_by_len = {'train':{},'dev':{},'test':{}}
for k in ['train','dev','test']:
pathlib.Path(out_dir + '/labeled-data/' + k).mkdir(parents=True, exist_ok=False)
all_tokens = 0
test = k in ['dev','test']
if test:
for corpus in data[k]['by length']:
all_tokens += self.process_table(data, k, lextypes, tables_by_len, test, corpus)
else:
all_tokens += self.process_table(data, k, lextypes, tables_by_len, test)
print('Total PROCESSED {} tokens: {}'.format(k, all_tokens))
def process_testsuites_nonautoreg(self,testsuites,lextypes, out_dir):
pos_mapper = pos_map.Pos_mapper('./pos-map.txt')
max_sen_length, corpus_size, num_ts, data = self.read_testsuites(testsuites)
for k in ['train','dev','test']:
is_devtest_data = k in ['dev','test']
pathlib.Path(out_dir + '/labeled-data/' + k).mkdir(parents=True, exist_ok=False)
if is_devtest_data:
for corpus in data[k]['by corpus']:
x,y = self.process_corpus(lextypes,corpus,pos_mapper)
data_table['ft'] = x
data_table['lt'] = y
with open(out_dir + '/labeled-data/' + k + '/' + corpus['name'], 'wb') as f:
pickle.dump(data_table, f)
else:
data_table = {'ft':[],'lt':[]}
for corpus in data[k]['by corpus']:
x, y = self.process_corpus(lextypes,corpus,pos_mapper)
data_table['ft'] += x
data_table['lt'] += y
with open(out_dir + '/labeled-data/train/train' , 'wb') as f:
pickle.dump(data_table, f)
def process_corpus(self, lextypes, corpus,pos_mapper):
data = []
y = []
for sen in corpus['tokens-tags']:
tokens, labels, pos_tags, autoregress_labels = \
self.get_tokens_labels(sen, CONTEXT_WINDOW, lextypes, pos_mapper, False)
for k, t in enumerate(tokens):
if k < CONTEXT_WINDOW or k >= len(tokens) - CONTEXT_WINDOW:
continue
y.append(labels[k])
data.append(self.get_context(t, tokens, pos_tags, k, CONTEXT_WINDOW))
return data, y
def process_table(self, data, k, lextypes, tables_by_len, test, corpus=None):
n = 0
table = data[k]['by length'] if not test else data[k]['by length'][corpus]
for sen_len in table:
tables_by_len[k][sen_len] = {}
autoregress_table = np.array([[{}] * len(table[sen_len])
for i in range(sen_len)])
labels_table = np.array([[{}] * len(table[sen_len]) for i in range(sen_len)])
# print("Processing sentences of length {}".format(sen_len))
n += self.process_length(lextypes, table[sen_len],
autoregress_table, labels_table, test=test)
tables_by_len[k][sen_len]['ft'] = autoregress_table
tables_by_len[k][sen_len]['lt'] = labels_table
if test:
with open(out_dir + '/labeled-data/' + k + '/' + corpus, 'wb') as f:
pickle.dump(tables_by_len[k], f)
else:
with open(out_dir + '/labeled-data/train/train' , 'wb') as f:
pickle.dump(tables_by_len[k], f)
return n
'''
Assume a numpy table coming in. Get e.g. tokens 2 through 5 in sentences 4 and 5,
for the test suite #20 in the data.
'''
def get_table_portion(self, ts_info, table, ts_num, token_range, sentence_range):
ts_column = ts_info[ts_num]['column']
tokens = sum(ts_info[ts_num]['sentences'][sentence_range[0]:sentence_range[1]])
return table[token_range[0]:token_range[1],ts_column:ts_column+tokens]
def process_testsuite(self, lextypes, logf, tsuite, autoregress_table, labels_table, start):
print("Processing " + tsuite['name'])
logf.write("Processing " + tsuite['name'] + '\n')
pairs = []
contexts = []
y = []
ys = []
pos_mapper = pos_map.Pos_mapper('./pos-map.txt') # do this for every test suite to count unknowns in each
for sentence_len in tsuite['sentences']:
items = tsuite['sentences'][sentence_len]
for j, lst_of_terminals in enumerate(items):
contexts.append([])
#if j % 100 == 0:
# print("Processing item {} out of {}...".format(j, len(items)))
tokens,labels,pos_tags,autoregress_labels = \
self.get_tokens_labels(tsuite['tokens-tags'][j],CONTEXT_WINDOW, lextypes,pos_mapper,test=False)
ys.append(labels[CONTEXT_WINDOW:CONTEXT_WINDOW*-1])
for k, t in enumerate(tokens):
if k < CONTEXT_WINDOW or k >= len(tokens) - CONTEXT_WINDOW:
continue
pairs.append((t, labels[k]))
y.append(labels[k])
contexts[j].append(self.get_context(t, tokens, pos_tags, k, CONTEXT_WINDOW))
autoregress_table[k-CONTEXT_WINDOW][start+j] = \
self.get_autoregress_context(tokens,pos_tags,autoregress_labels, k,CONTEXT_WINDOW)
labels_table[k-CONTEXT_WINDOW][start+j] = labels[k]
pairs.append(('--EOS--','--EOS--')) # sentence separator
y.append('\n') # sentence separator
self.write_output(contexts, pairs, tsuite['name'])
return ys
def process_length(self, lextypes, items, autoregress_table, labels_table,test):
y = []
ys = []
all_tokens = 0
pos_mapper = pos_map.Pos_mapper('./pos-map.txt') # do this for every test suite to count unknowns in each
for j, lst_of_terminals in enumerate(items):
#if j % 100 == 0:
# print("Processing item {} out of {}...".format(j, len(items)))
tokens,labels,pos_tags,autoregress_labels = \
self.get_tokens_labels(lst_of_terminals,CONTEXT_WINDOW, lextypes,pos_mapper,test)
ys.append(labels[CONTEXT_WINDOW:CONTEXT_WINDOW*-1])
for k, t in enumerate(tokens):
if k < CONTEXT_WINDOW or k >= len(tokens) - CONTEXT_WINDOW:
continue
y.append(labels[k])
autoregress_table[k-CONTEXT_WINDOW][j] = \
self.get_autoregress_context(tokens,pos_tags,autoregress_labels, k,CONTEXT_WINDOW)
labels_table[k-CONTEXT_WINDOW][j] = labels[k]
all_tokens += 1
y.append('\n') # sentence separator
return all_tokens
def map_lattice_to_input(self, p_input, p_tokens, deriv):
yy_lattice = YYTokenLattice.from_string(p_tokens)
yy_input = YYTokenLattice.from_string(p_input)
terminals_toks_postags = []
for t in deriv.terminals():
toks_pos_tags = []
for ttok in t.tokens:
span = None
pos_probs = {}
for lat_tok in yy_lattice.tokens:
if lat_tok.id == ttok.id:
span = lat_tok.lnk.data
break
for i,in_tok in enumerate(yy_input.tokens):
if in_tok.lnk.data[0] == span[0]:
for pos, p in in_tok.pos:
if pos not in pos_probs:
pos_probs[pos] = []
pos_probs[pos].append(float(p))
if in_tok.lnk.data[1] != span[1]:
cur_tok = in_tok
while cur_tok.lnk.data[1] != span[1]:
next_tok = yy_input.tokens[i+1]
i += 1
for pos, p in next_tok.pos:
if pos not in pos_probs:
pos_probs[pos] = []
pos_probs[pos].append(float(p))
cur_tok = next_tok
else:
break
toks_pos_tags.append((ttok, pos_probs))
terminals_toks_postags.append((t,toks_pos_tags))
return terminals_toks_postags
def write_output(self, contexts, pairs, ts_name):
for d in ['train/','test/','dev/', 'ignore/']:
for pd in ['simple/','by-corpus/contexts/','by-corpus/true_labels/']:
pathlib.Path('./output/' + pd + d).mkdir(parents=True, exist_ok=True)
true_labels = []
suf = 'train/'
if ts_name in IGNORE:
suf = 'ignore/'
if ts_name in TEST:
suf = 'test/'
elif ts_name in DEV:
suf = 'dev/'
with open('./output/simple/' + suf + ts_name, 'w') as f:
for form, letype in pairs:
if not letype=='--EOS--':
true_labels.append(str(letype))
str_pair = f'{form}\t{letype}'
f.write(str_pair + '\n')
else:
f.write('\n') # sentence separator
true_labels.append('\n') # sentence separator
with open('./output/by-corpus/true_labels/' + suf + ts_name, 'w') as f:
for tl in true_labels:
f.write(tl)
if tl != '\n':
f.write('\n')
with open('./output/by-corpus/contexts/' + suf + ts_name, 'w') as f:
f.write(json.dumps(contexts))
def get_context(self, t, tokens, pos_tags, i, window):
context = {'w': t, 'pos': pos_tags[i]}
for j in range(1,window+1):
prev_tok = tokens[i-j]
prev_pos = pos_tags[i-j]
next_tok = tokens[i+j]
next_pos = pos_tags[i+j]
context['w-' + str(j)] = prev_tok
context['w+' + str(j)] = next_tok
context['pos-' + str(j)] = prev_pos
context['pos+' + str(j)] = next_pos
return context
def get_autoregress_context(self,tokens,pos_tags,predicted_labels, k,window):
context = {'w':tokens[k],'pos':pos_tags[k]}
for i in range(1,window+1):
context['w-' + str(i)] = tokens[k-i]
context['w+' + str(i)] = tokens[k+i]
context['pos-' + str(i)] = pos_tags[k-i]
context['pos+' + str(i)] = pos_tags[k+i]
context['tag-' + str(i)] = predicted_labels[k-i] # Will be None or FAKE in test mode
return context
def get_tokens_labels(self, terms_and_tokens_tags, context_window, lextypes,pos_mapper, test):
tokens = []
labels = []
pos_tags = []
previous_tags = []
for i,(terminal, toks_tags) in enumerate(terms_and_tokens_tags):
letype = str(lextypes.get(terminal.parent.entity, "<UNK>"))
tokens.append(terminal.form)
labels.append(letype)
pos_tags.append(self.get_pos_tag(toks_tags, pos_mapper))
if test:
previous_tags.append(None)
else:
previous_tags.append(letype)
for i in range(1,1+context_window):
tokens.insert(0, 'FAKE-' + str(i))
labels.insert(0, 'FAKE-' + str(i))
pos_tags.insert(0,'FAKE-' + str(i))
previous_tags.insert(0, 'FAKE-' + str(i))
tokens.append('FAKE+' + str(i))
labels.append('FAKE+' + str(i))
pos_tags.append('FAKE+' + str(i))
return tokens, labels, pos_tags, previous_tags
def get_pos_tag(self,tokens_tags, pos_mapper):
tag = ''
for tt in tokens_tags:
pos_probs = tt[1]
for pos in pos_probs:
tag = tag + '+' + pos
tag = tag.strip('+')
if '+' in tag:
tag = pos_mapper.map_tag(tag)
return tag
if __name__ == "__main__":
args = sys.argv[1:]
dt_str = '-'.join(str(datetime.now()).split()).replace(':','.')
run_id = sys.argv[3] + dt_str
if len(sys.argv) > 3:
autoreg = sys.argv[4] == 'autoreg'
out_dir = './output/' + run_id
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=False)
le = LexTypeExtractor()
le.parse_lexicons(args[0])
le.stats['total lextypes'] = len(le.lextypes)
if autoreg:
le.process_testsuites_autoreg(args[1],le.lextypes,out_dir)
else:
le.process_testsuites_nonautoreg(args[1],le.lextypes,out_dir)
with open(out_dir + '/lextypes','wb') as f:
lextypes = set([str(v) for v in list(le.lextypes.values())])
pickle.dump(lextypes,f)
| 15,668
| 0
| 457
|
e415bab977b01817df0d4c4b2e45aacf11aa8fbf
| 2,294
|
py
|
Python
|
homeassistant/components/environment_canada/camera.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/environment_canada/camera.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/environment_canada/camera.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Support for the Environment Canada radar imagery."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.camera import Camera
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import (
AddEntitiesCallback,
async_get_current_platform,
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ATTR_OBSERVATION_TIME, DOMAIN
SERVICE_SET_RADAR_TYPE = "set_radar_type"
SET_RADAR_TYPE_SCHEMA = {
vol.Required("radar_type"): vol.In(["Auto", "Rain", "Snow"]),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add a weather entity from a config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]["radar_coordinator"]
async_add_entities([ECCamera(coordinator)])
platform = async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_RADAR_TYPE,
SET_RADAR_TYPE_SCHEMA,
"async_set_radar_type",
)
class ECCamera(CoordinatorEntity, Camera):
"""Implementation of an Environment Canada radar camera."""
def __init__(self, coordinator):
"""Initialize the camera."""
super().__init__(coordinator)
Camera.__init__(self)
self.radar_object = coordinator.ec_data
self._attr_name = f"{coordinator.config_entry.title} Radar"
self._attr_unique_id = f"{coordinator.config_entry.unique_id}-radar"
self._attr_attribution = self.radar_object.metadata["attribution"]
self._attr_entity_registry_enabled_default = False
self.content_type = "image/gif"
def camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return bytes of camera image."""
self._attr_extra_state_attributes = {
ATTR_OBSERVATION_TIME: self.radar_object.timestamp,
}
return self.radar_object.image
async def async_set_radar_type(self, radar_type: str):
"""Set the type of radar to retrieve."""
self.radar_object.precip_type = radar_type.lower()
await self.radar_object.update()
| 33.246377
| 79
| 0.722319
|
"""Support for the Environment Canada radar imagery."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.camera import Camera
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import (
AddEntitiesCallback,
async_get_current_platform,
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ATTR_OBSERVATION_TIME, DOMAIN
SERVICE_SET_RADAR_TYPE = "set_radar_type"
SET_RADAR_TYPE_SCHEMA = {
vol.Required("radar_type"): vol.In(["Auto", "Rain", "Snow"]),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add a weather entity from a config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]["radar_coordinator"]
async_add_entities([ECCamera(coordinator)])
platform = async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_RADAR_TYPE,
SET_RADAR_TYPE_SCHEMA,
"async_set_radar_type",
)
class ECCamera(CoordinatorEntity, Camera):
"""Implementation of an Environment Canada radar camera."""
def __init__(self, coordinator):
"""Initialize the camera."""
super().__init__(coordinator)
Camera.__init__(self)
self.radar_object = coordinator.ec_data
self._attr_name = f"{coordinator.config_entry.title} Radar"
self._attr_unique_id = f"{coordinator.config_entry.unique_id}-radar"
self._attr_attribution = self.radar_object.metadata["attribution"]
self._attr_entity_registry_enabled_default = False
self.content_type = "image/gif"
def camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return bytes of camera image."""
self._attr_extra_state_attributes = {
ATTR_OBSERVATION_TIME: self.radar_object.timestamp,
}
return self.radar_object.image
async def async_set_radar_type(self, radar_type: str):
"""Set the type of radar to retrieve."""
self.radar_object.precip_type = radar_type.lower()
await self.radar_object.update()
| 0
| 0
| 0
|
dfd8028393ae8ae7d4bfcfe8f9c74276b8f956f7
| 308
|
py
|
Python
|
v1/chapter6/4-readingCsvDict.py
|
QTYResources/python-scraping
|
d7afe25a012fb5d079ee42372c7fce94b9494b9f
|
[
"MIT"
] | null | null | null |
v1/chapter6/4-readingCsvDict.py
|
QTYResources/python-scraping
|
d7afe25a012fb5d079ee42372c7fce94b9494b9f
|
[
"MIT"
] | null | null | null |
v1/chapter6/4-readingCsvDict.py
|
QTYResources/python-scraping
|
d7afe25a012fb5d079ee42372c7fce94b9494b9f
|
[
"MIT"
] | null | null | null |
from urllib.request import urlopen
from io import StringIO
import csv
data = urlopen("http://pythonscraping.com/files/MontyPythonAlbums.csv").read().decode('ascii', 'ignore')
dataFile = StringIO(data)
dictReader = csv.DictReader(dataFile)
print(dictReader.fieldnames)
for row in dictReader:
print(row)
| 25.666667
| 104
| 0.775974
|
from urllib.request import urlopen
from io import StringIO
import csv
data = urlopen("http://pythonscraping.com/files/MontyPythonAlbums.csv").read().decode('ascii', 'ignore')
dataFile = StringIO(data)
dictReader = csv.DictReader(dataFile)
print(dictReader.fieldnames)
for row in dictReader:
print(row)
| 0
| 0
| 0
|
02ae809d3645a6053bab6f39633b7e2d90bf2e2e
| 741
|
py
|
Python
|
hash-array-string/1.8-zero-matrix/main.py
|
digoreis/code-interview
|
e2250c39b0fc9b6a8f0bc151b4f796d17cdce3e3
|
[
"MIT"
] | null | null | null |
hash-array-string/1.8-zero-matrix/main.py
|
digoreis/code-interview
|
e2250c39b0fc9b6a8f0bc151b4f796d17cdce3e3
|
[
"MIT"
] | null | null | null |
hash-array-string/1.8-zero-matrix/main.py
|
digoreis/code-interview
|
e2250c39b0fc9b6a8f0bc151b4f796d17cdce3e3
|
[
"MIT"
] | null | null | null |
# Write an algorithm such that if an element in MxN matrix is 0, it's entire row and column are set to 0.
matrix = [[1,1,1,1],[1,1,1,1],[1,1,0,1],[1,1,1,1],[1,1,1,0]]
matrixZero(matrix)
print('\n'.join(['\t'.join([str(cell) for cell in row]) for row in matrix]))
| 26.464286
| 105
| 0.562753
|
# Write an algorithm such that if an element in MxN matrix is 0, it's entire row and column are set to 0.
def rowZero(matrix, row):
for i in range(len(matrix[row])):
matrix[row][i] = 0
def columnZero(matrix, column):
for i in range(len(matrix)):
matrix[i][column] = 0
def matrixZero(matrix):
points = []
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
points.append((i,j))
for p in points:
rowZero(matrix, p[0])
columnZero(matrix, p[1])
matrix = [[1,1,1,1],[1,1,1,1],[1,1,0,1],[1,1,1,1],[1,1,1,0]]
matrixZero(matrix)
print('\n'.join(['\t'.join([str(cell) for cell in row]) for row in matrix]))
| 393
| 0
| 68
|
d805c677ed9537d580479c240741257bc4c84e5c
| 4,725
|
py
|
Python
|
src/Class/shadowedrice.py
|
Jonathan-Browning/Shadowed-Rician-Fading-Python
|
c1faa061c4d2a253bd1fe7098edc0e21740cb3ea
|
[
"MIT"
] | 2
|
2021-02-23T15:49:47.000Z
|
2021-04-24T01:32:42.000Z
|
src/Class/shadowedrice.py
|
Jonathan-Browning/Shadowed-Rician-Fading-Python
|
c1faa061c4d2a253bd1fe7098edc0e21740cb3ea
|
[
"MIT"
] | null | null | null |
src/Class/shadowedrice.py
|
Jonathan-Browning/Shadowed-Rician-Fading-Python
|
c1faa061c4d2a253bd1fe7098edc0e21740cb3ea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 18:55:46 2020
@author: Jonathan Browning
"""
import numpy as np
from scipy.stats import gaussian_kde as kdf
from scipy import special as sp
| 36.346154
| 151
| 0.572275
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 18:55:46 2020
@author: Jonathan Browning
"""
import numpy as np
from scipy.stats import gaussian_kde as kdf
from scipy import special as sp
class ShadowedRice:
numSamples = 2*(10**6) # the number of samples used in the simulation
r = np.linspace(0, 6, 6000) # theoretical envelope PDF x axes
theta = np.linspace(-np.pi, np.pi, 6000) # theoretical phase PDF x axes
def __init__(self, K, m, r_hat, phi):
# user input checks and assigns value
self.K = self.input_Check(K, "K", 0.001, 50)
self.m = self.input_Check(m, "m", 0.001, 50)
self.r_hat = self.input_Check(r_hat, "\hat{r}", 0.5, 2.5)
self.phi = self.input_Check(phi, "\phi", -np.pi, np.pi)
# simulating and theri densities
self.multipathFading = self.complex_Multipath_Fading()
self.xdataEnv, self.ydataEnv = self.envelope_Density()
self.xdataPh, self.ydataPh = self.phase_Density()
# theoretical PDFs calculated
self.envelopeProbability = self.envelope_PDF()
self.phaseProbability = self.phase_PDF()
def input_Check(self, data, inputName, lower, upper):
# input_Check checks the user inputs
# has a value been entered
if data == "":
raise ValueError(" ".join((inputName, "must have a numeric value")))
# incase of an non-numeric input
try:
data = float(data)
except:
raise ValueError(" ".join((inputName, "must have a numeric value")))
# data must be within the range
if data < lower or data > upper:
raise ValueError(" ".join((inputName, f"must be in the range [{lower:.2f}, {upper:.2f}]")))
return data
def calculate_Means(self):
# calculate_means calculates the means of the complex Gaussians representing the
# in-phase and quadrature components
p = np.sqrt(self.K / (1+self.K)) * self.r_hat * np.cos(self.phi)
q = np.sqrt(self.K / (1+self.K)) * self.r_hat * np.sin(self.phi)
return p, q
def scattered_Component(self):
# scattered_Component calculates the power of the scattered signal component
sigma = self.r_hat / np.sqrt( 2 * (1+self.K) )
return sigma
def generate_Gaussians(self, mean, sigma):
# generate_Gaussians generates the Gaussian random variables
gaussians = np.random.default_rng().normal(mean, sigma, self.numSamples)
return gaussians
def complex_Multipath_Fading(self):
# complex_Multipath_Fading generates the complex fading random variables
p, q = self.calculate_Means()
sigma = self.scattered_Component()
xi = np.sqrt(np.random.gamma(self.m, 1/self.m, self.numSamples))
multipathFading = self.generate_Gaussians(xi*p, sigma) + (1j*self.generate_Gaussians(xi*q, sigma))
return multipathFading
def envelope_PDF(self):
# envelope_PDF calculates the theoretical envelope PDF
PDF = 2 * (1+self.K) * self.r *(self.m**(self.m)) / (self.r_hat**(2)*(self.m+self.K)**(self.m)) \
* np.exp(- ((1+self.K) * self.r**(2)) / self.r_hat**(2)) \
* sp.hyp1f1(self.m, 1, self.r**(2)*self.K*(self.K+1)/(self.r_hat**(2)*(self.K+self.m)))
return PDF
def phase_PDF(self):
# phase_PDF calculates the theoretical phase PDF
PDF = (self.m**self.m * np.sqrt(self.K)/(2 * np.sqrt(np.pi) * (self.K + self.m)**(self.m +1/2))) \
* ( np.sqrt((self.K +self.m)/(np.pi*self.K)) * sp.hyp2f1(self.m, 1, 1/2, (self.K*(np.cos(self.theta - self.phi))**(2))/(self.K +self.m)) \
+ ((sp.gamma(self.m+1/2) / sp.gamma(self.m))*np.cos(self.theta-self.phi) \
* (1- (self.K*(np.cos(self.theta - self.phi))**(2)) / (self.K +self.m))**(-self.m-1/2)))
return PDF
def envelope_Density(self):
# envelope_Density finds the envelope PDF of the simulated random variables
R = np.sqrt((np.real(self.multipathFading))**2 + (np.imag(self.multipathFading))**2)
kde = kdf(R)
x = np.linspace(R.min(), R.max(), 100)
p = kde(x)
return x, p
def phase_Density(self):
# phase_Density finds the phase PDF of the simulated random variables
R = np.angle(self.multipathFading)
kde = kdf(R)
x = np.linspace(R.min(), R.max(), 100)
p = kde(x)
return x, p
| 3,978
| 525
| 23
|
c00b03a6c58efa0a53f7586ea8d163bb92f588f1
| 1,063
|
py
|
Python
|
src/ggrc_workflows/migrations/versions/20170925135632_3ebe14ae9547_set_empty_next_cycle_start_date.py
|
HLD/ggrc-core
|
9bdc0fc6ca9e252f4919db682d80e360d5581eb4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc_workflows/migrations/versions/20170925135632_3ebe14ae9547_set_empty_next_cycle_start_date.py
|
HLD/ggrc-core
|
9bdc0fc6ca9e252f4919db682d80e360d5581eb4
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2018-07-06T00:04:23.000Z
|
2021-02-26T21:13:20.000Z
|
src/ggrc_workflows/migrations/versions/20170925135632_3ebe14ae9547_set_empty_next_cycle_start_date.py
|
HLD/ggrc-core
|
9bdc0fc6ca9e252f4919db682d80e360d5581eb4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2017-11-11T22:16:56.000Z
|
2017-11-11T22:16:56.000Z
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Set empty next_cycle_start_date
Create Date: 2017-09-25 13:56:32.087965
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '3ebe14ae9547'
down_revision = '4991c5731711'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE workflows, ( "
"SELECT w.id "
"FROM workflows AS w "
"LEFT JOIN task_groups AS tg ON tg.workflow_id = w.id "
"LEFT JOIN task_group_tasks AS t ON t.task_group_id = tg.id "
"WHERE t.id IS NULL AND w.next_cycle_start_date IS NOT NULL "
") AS t "
"SET workflows.next_cycle_start_date = NULL "
"WHERE workflows.id = t.id;")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
| 29.527778
| 79
| 0.664158
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Set empty next_cycle_start_date
Create Date: 2017-09-25 13:56:32.087965
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '3ebe14ae9547'
down_revision = '4991c5731711'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE workflows, ( "
"SELECT w.id "
"FROM workflows AS w "
"LEFT JOIN task_groups AS tg ON tg.workflow_id = w.id "
"LEFT JOIN task_group_tasks AS t ON t.task_group_id = tg.id "
"WHERE t.id IS NULL AND w.next_cycle_start_date IS NOT NULL "
") AS t "
"SET workflows.next_cycle_start_date = NULL "
"WHERE workflows.id = t.id;")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
| 0
| 0
| 0
|
a9dd6bbf97a596cde44dca1d194056908053fcb0
| 29,435
|
py
|
Python
|
test/test_madx.py
|
odidev/cpymad
|
7b58d013a669d0973c233743e05fa205257233dd
|
[
"ECL-2.0",
"Apache-2.0"
] | 22
|
2015-05-27T13:45:55.000Z
|
2022-03-03T15:43:47.000Z
|
test/test_madx.py
|
odidev/cpymad
|
7b58d013a669d0973c233743e05fa205257233dd
|
[
"ECL-2.0",
"Apache-2.0"
] | 102
|
2015-01-23T18:21:29.000Z
|
2022-02-28T17:07:26.000Z
|
test/test_madx.py
|
odidev/cpymad
|
7b58d013a669d0973c233743e05fa205257233dd
|
[
"ECL-2.0",
"Apache-2.0"
] | 18
|
2015-01-24T12:43:57.000Z
|
2021-11-23T08:29:57.000Z
|
"""
Tests for the :class:`cpymad.madx.Madx` API.
"""
import os
import sys
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from pytest import approx, fixture, mark, raises
import cpymad
from cpymad.madx import Madx, Sequence, metadata
@fixture
@fixture
SEQU = """
! constants
QP_K1 = 2;
! elements
qp: quadrupole, k1:=QP_K1, l=1;
sb: sbend, l=2, angle=3.14/4;
dr: drift, l=1;
! sequences
s1: sequence, l=8, refer=center;
dr, at=0.5; ! dr[1] ~ betx_full1[1]
qp, at=1.5;
dr, at=2.5; ! dr[2] ~ betx_full1[3] ~ betx_range[0]
qp, at=3.5; ! ~ betx_full1[4] ~ betx_range[1]
dr, at=4.5;
sb, at=6.0; ! ~ betx_range[3]
dr, at=7.5;
endsequence;
s2: sequence, l=3, refer=entry;
qp1: qp, at=0, k1=3;
qp2: qp, at=1, l=2;
endsequence;
"""
def normalize(path):
"""Normalize path name to eliminate different spellings of the same path.
This is needed for path comparisons in tests, especially on windows where
pathes are case insensitive and allow a multitude of spellings."""
return os.path.normcase(os.path.normpath(path))
def test_version(mad):
"""Check that the Madx.version attribute can be used as expected."""
version = mad.version
# check format:
major, minor, micro = map(int, version.release.split('.'))
# We need at least MAD-X 5.05.00:
assert (major, minor, micro) >= (5, 5, 0)
# check format:
year, month, day = map(int, version.date.split('.'))
assert (year, month, day) >= (2019, 5, 10)
assert 1 <= month <= 12
assert 1 <= day <= 31
assert str(version).startswith(
'MAD-X {}'.format(version.release))
# TODO: We need to fix this on windows, but for now, I just need it to
# pass so that the CI builds the release...
@mark.xfail(
sys.platform != 'linux',
reason='Output is sometimes garbled on MacOS and windows.',
)
@mark.xfail(
sys.platform != 'linux',
reason='Output is sometimes garbled on MacOS and windows.',
)
def test_command_log():
"""Check that the command log contains all input commands."""
# create a new Madx instance that uses the history feature:
history_filename = '_test_madx.madx.tmp'
try:
# feed some input lines and compare with history file:
lines = """
l = 5;
f = 200;
fodo: sequence, refer=entry, l=100;
QF: quadrupole, l=5, at= 0, k1= 1/(f*l);
QD: quadrupole, l=5, at=50, k1=-1/(f*l);
endsequence;
beam, particle=proton, energy=2;
use, sequence=fodo;
""".splitlines()
lines = [line.strip() for line in lines if line.strip()]
with Madx(command_log=history_filename) as mad:
for line in lines:
mad.input(line)
with open(history_filename) as history_file:
history = history_file.read()
assert history.strip() == '\n'.join(lines).strip()
finally:
# remove history file
os.remove(history_filename)
def test_append_semicolon():
"""Check that semicolon is automatically appended to input() text."""
# Regression test for #73
log = []
with Madx(command_log=log.append) as mad:
mad.input('a = 0')
mad.input('b = 1')
assert log == ['a = 0;', 'b = 1;']
assert mad.globals.a == 0
assert mad.globals.b == 1
# def test_sequence_get_expanded_elements():
def test_crash(mad):
"""Check that a RuntimeError is raised in case MAD-X crashes."""
assert bool(mad)
# a.t.m. MAD-X crashes on this input, because the L (length)
# parametere is missing:
raises(RuntimeError, mad.input, 'XXX: sequence;')
assert not bool(mad)
| 29.114738
| 77
| 0.592526
|
"""
Tests for the :class:`cpymad.madx.Madx` API.
"""
import os
import sys
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from pytest import approx, fixture, mark, raises
import cpymad
from cpymad.madx import Madx, Sequence, metadata
@fixture
def mad():
with Madx(prompt='X:> ') as mad:
yield mad
@fixture
def lib(mad):
return mad._libmadx
SEQU = """
! constants
QP_K1 = 2;
! elements
qp: quadrupole, k1:=QP_K1, l=1;
sb: sbend, l=2, angle=3.14/4;
dr: drift, l=1;
! sequences
s1: sequence, l=8, refer=center;
dr, at=0.5; ! dr[1] ~ betx_full1[1]
qp, at=1.5;
dr, at=2.5; ! dr[2] ~ betx_full1[3] ~ betx_range[0]
qp, at=3.5; ! ~ betx_full1[4] ~ betx_range[1]
dr, at=4.5;
sb, at=6.0; ! ~ betx_range[3]
dr, at=7.5;
endsequence;
s2: sequence, l=3, refer=entry;
qp1: qp, at=0, k1=3;
qp2: qp, at=1, l=2;
endsequence;
"""
def normalize(path):
"""Normalize path name to eliminate different spellings of the same path.
This is needed for path comparisons in tests, especially on windows where
pathes are case insensitive and allow a multitude of spellings."""
return os.path.normcase(os.path.normpath(path))
def test_copyright():
notice = cpymad.get_copyright_notice()
assert isinstance(notice, type(u""))
def test_version(mad):
"""Check that the Madx.version attribute can be used as expected."""
version = mad.version
# check format:
major, minor, micro = map(int, version.release.split('.'))
# We need at least MAD-X 5.05.00:
assert (major, minor, micro) >= (5, 5, 0)
# check format:
year, month, day = map(int, version.date.split('.'))
assert (year, month, day) >= (2019, 5, 10)
assert 1 <= month <= 12
assert 1 <= day <= 31
assert str(version).startswith(
'MAD-X {}'.format(version.release))
def test_metadata(mad):
version = mad.version
assert metadata.__version__ == version.release
assert isinstance(metadata.get_copyright_notice(), type(u""))
def test_independent_instances():
# Check independence by defining a variable differently in each
# instance:
with Madx(prompt='X1:> ') as mad1, Madx(prompt='X2:> ') as mad2:
mad1.input('ANSWER=42;')
mad2.input('ANSWER=43;')
assert mad1.eval('ANSWER') == 42
assert mad2.eval('ANSWER') == 43
# TODO: We need to fix this on windows, but for now, I just need it to
# pass so that the CI builds the release...
@mark.xfail(
sys.platform != 'linux',
reason='Output is sometimes garbled on MacOS and windows.',
)
def test_streamreader():
output = []
with Madx(stdout=output.append) as m:
assert len(output) == 1
assert b'+++++++++++++++++++++++++++++++++' in output[0]
assert b'+ Support: [email protected],' in output[0]
assert b'+ Release date: ' in output[0]
assert b'+ Execution date: ' in output[0]
# assert b'+ Support: [email protected], ', output[1]
m.input('foo = 3;')
assert len(output) == 1
m.input('foo = 3;')
assert len(output) == 2
assert output[1] == b'++++++ info: foo redefined\n'
assert len(output) == 3
assert b'+ MAD-X finished normally ' in output[2]
def test_quit(mad):
mad.quit()
assert mad._process.returncode is not None
assert not bool(mad)
with raises(RuntimeError):
mad.input(';')
@mark.xfail(
sys.platform != 'linux',
reason='Output is sometimes garbled on MacOS and windows.',
)
def test_context_manager():
output = []
with Madx(stdout=output.append) as m:
m.input('foo = 3;')
assert m.globals.foo == 3
assert b'+ MAD-X finished normally ' in output[-1]
assert not bool(m)
with raises(RuntimeError):
m.input(';')
def test_command_log():
"""Check that the command log contains all input commands."""
# create a new Madx instance that uses the history feature:
history_filename = '_test_madx.madx.tmp'
try:
# feed some input lines and compare with history file:
lines = """
l = 5;
f = 200;
fodo: sequence, refer=entry, l=100;
QF: quadrupole, l=5, at= 0, k1= 1/(f*l);
QD: quadrupole, l=5, at=50, k1=-1/(f*l);
endsequence;
beam, particle=proton, energy=2;
use, sequence=fodo;
""".splitlines()
lines = [line.strip() for line in lines if line.strip()]
with Madx(command_log=history_filename) as mad:
for line in lines:
mad.input(line)
with open(history_filename) as history_file:
history = history_file.read()
assert history.strip() == '\n'.join(lines).strip()
finally:
# remove history file
os.remove(history_filename)
def test_append_semicolon():
"""Check that semicolon is automatically appended to input() text."""
# Regression test for #73
log = []
with Madx(command_log=log.append) as mad:
mad.input('a = 0')
mad.input('b = 1')
assert log == ['a = 0;', 'b = 1;']
assert mad.globals.a == 0
assert mad.globals.b == 1
def test_call_and_chdir(mad):
folder = os.path.abspath(os.path.dirname(__file__))
parent = os.path.dirname(folder)
getcwd = mad._libmadx.getcwd
g = mad.globals
mad.chdir(folder)
assert normalize(getcwd()) == normalize(folder)
mad.call('answer_42.madx')
assert g.answer == 42
with mad.chdir('..'):
assert normalize(getcwd()) == normalize(parent)
mad.call('test/answer_43.madx')
assert g.answer == 43
mad.call('test/answer_call42.madx', True)
assert g.answer == 42
assert normalize(getcwd()) == normalize(folder)
mad.call('answer_43.madx')
assert g.answer == 43
mad.chdir('..')
assert normalize(getcwd()) == normalize(parent)
def _check_twiss(mad, seq_name):
beam = 'ex=1, ey=2, particle=electron, sequence={0};'.format(seq_name)
mad.command.beam(beam)
mad.use(seq_name)
initial = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5)
twiss = mad.twiss(sequence=seq_name, **initial)
# Check initial values:
assert twiss['alfx'][0] == approx(initial['alfx'])
assert twiss['alfy'][0] == approx(initial['alfy'])
assert twiss['betx'][0] == approx(initial['betx'])
assert twiss['bety'][0] == approx(initial['bety'])
assert twiss.summary['ex'] == approx(1)
assert twiss.summary['ey'] == approx(2)
# Check that keys are all lowercase:
for k in twiss:
assert k == k.lower()
for k in twiss.summary:
assert k == k.lower()
def test_error(mad):
mad.input("""
seq: sequence, l=1;
endsequence;
beam;
use, sequence=seq;
""")
# Errors in MAD-X must not crash, but return False instead:
assert not mad.input('twiss;')
assert mad.input('twiss, betx=1, bety=1;')
def test_twiss_1(mad):
mad.input(SEQU)
_check_twiss(mad, 's1') # s1 can be computed at start
_check_twiss(mad, 's1') # s1 can be computed multiple times
_check_twiss(mad, 's2') # s2 can be computed after s1
def test_twiss_2(mad):
mad.input(SEQU)
_check_twiss(mad, 's2') # s2 can be computed at start
_check_twiss(mad, 's1') # s1 can be computed after s2
def test_twiss_with_range(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s1;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s1')
params = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5,
sequence='s1')
# Compute TWISS on full sequence, then on a sub-range, then again on
# the full sequence. This checks that none of the range selections
# have side-effects on each other:
betx_full1 = mad.twiss(**params)['betx']
betx_range = mad.twiss(range=('dr[2]', 'sb'), **params)['betx']
betx_full2 = mad.twiss(**params)['betx']
# Check that the results have the expected lengths:
assert len(betx_full1) == 9
assert len(betx_range) == 4
assert len(betx_full2) == 9
# Check numeric results. Since the first 3 elements of range and full
# sequence are identical, equal results are expected. And non-equal
# results afterwards.
assert betx_range[0] == approx(betx_full1[1]) # dr:2, dr:1
assert betx_range[1] == approx(betx_full1[2]) # qp:2, qp:1
assert betx_range[2] == approx(betx_full1[3]) # dr:3, dr:2
assert betx_range[3] != approx(betx_full1[4]) # sb, qp:2
def test_range_row_api(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s1;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s1')
params = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5,
sequence='s1')
tab = mad.twiss(range=('dr[2]', 'sb'), **params)
assert tab.range == ('dr[2]', 'sb')
assert 'betx' in tab
def test_survey(mad):
mad.input(SEQU)
mad.beam()
mad.use('s1')
tab = mad.survey()
assert tab._name == 'survey'
assert 'x' in tab
assert 'y' in tab
assert 'z' in tab
assert 'theta' in tab
assert 'phi' in tab
assert 'psi' in tab
assert tab.x[-1] < -1
assert tab.y == approx(0)
assert tab.z[-1] > 7
def test_match(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s2;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s2')
params = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5,
sequence='s2')
mad.match(constraints=[dict(range='s1$end', betx=2.0)],
weight={'betx': 2},
vary=['qp2->k1'],
**params)
twiss = mad.twiss(**params)
val = twiss.betx[-1]
assert val == approx(2.0, rel=1e-2)
def test_verbose(mad):
mad.verbose(False)
assert mad.options.echo is False
assert mad.options.info is False
mad.verbose(True)
assert mad.options.echo is True
assert mad.options.info is True
def test_active_sequence(mad):
mad.input(SEQU)
mad.command.beam('ex=1, ey=2, particle=electron, sequence=s1;')
mad.use('s1')
assert mad.sequence() == 's1'
mad.beam()
mad.use('s2')
assert mad.sequence().name == 's2'
def test_get_sequence(mad):
mad.input(SEQU)
with raises(KeyError):
mad.sequence['sN']
s1 = mad.sequence['s1']
assert s1.name == 's1'
seqs = mad.sequence
assert set(seqs) == {'s1', 's2'}
def test_eval(mad):
mad.input(SEQU)
assert mad.eval(True) is True
assert mad.eval(13) == 13
assert mad.eval(1.3) == 1.3
assert mad.eval([2, True, 'QP_K1']) == [2, True, 2.0]
assert mad.eval("1/QP_K1") == approx(0.5)
def test_eval_functions(mad):
assert mad.eval("sin(1.0)") == approx(np.sin(1.0))
assert mad.eval("cos(1.0)") == approx(np.cos(1.0))
mad.input("""
mqf.k1 = 0.3037241107;
mqd.k1 = -0.3037241107;
fodo: sequence, l=10, refer=entry;
mqf: quadrupole, at=0, l=1, k1:=mqf.k1;
dff: drift, at=1, l=4;
mqd: quadrupole, at=5, l=1, k1:=mqd.k1;
dfd: drift, at=6, l=4;
endsequence;
beam;
use, sequence=fodo;
twiss, sequence=fodo, x=0.1;
""")
elems = mad.sequence.fodo.expanded_elements
twiss = mad.table.twiss
mad.input("mqf_x = table(twiss, mqf, x);")
assert mad.eval("table(twiss, mqf, x)") \
== twiss.row(elems.index('mqf')).x \
== mad.globals.mqf_x
def test_globals(mad):
g = mad.globals
# Membership:
assert 'FOO' not in g
# Setting values:
g['FOO'] = 2
assert 'FOO' in g
assert g['FOO'] == 2
assert mad.eval('FOO') == 2
# Re-setting values:
g['FOO'] = 3
assert mad.eval('FOO') == 3
# Setting expressions:
g['BAR'] = '3*foo'
assert mad.eval('BAR') == 9
g['FOO'] = 4
assert mad.eval('BAR') == 12
assert g.defs.bar == "3*foo"
assert g.cmdpar.bar.definition == "3*foo"
# attribute access:
g.bar = 42
assert g.defs.bar == 42
assert g.cmdpar.bar.definition == 42
assert g.BAR == 42
# repr
assert "'bar': 42.0" in str(g)
with raises(NotImplementedError):
del g['bar']
with raises(NotImplementedError):
del g.bar
assert g.bar == 42 # still there
assert 'bar' in list(g)
assert 'foo' in list(g)
# assert list(g) == list(g.defs)
# assert list(g) == list(g.cmdpar)
assert len(g) == len(list(g))
assert len(g.defs) == len(list(g.defs))
assert len(g.cmdpar) == len(list(g.cmdpar))
def test_elements(mad):
mad.input(SEQU)
assert 'sb' in mad.elements
assert 'sb' in list(mad.elements)
assert 'foobar' not in mad.elements
assert mad.elements['sb']['angle'] == approx(3.14/4)
idx = mad.elements.index('qp1')
elem = mad.elements[idx]
assert elem['k1'] == 3
def test_sequence_map(mad):
mad.input(SEQU)
seq = mad.sequence
assert len(seq) == 2
assert set(seq) == {'s1', 's2'}
assert 's1' in seq
assert 's3' not in seq
assert hasattr(seq, 's1')
assert not hasattr(seq, 's3')
assert seq.s1.name == 's1'
assert seq.s2.name == 's2'
with raises(AttributeError):
seq.s3
def test_table_map(mad):
mad.input(SEQU)
mad.beam()
mad.use('s2')
mad.survey(sequence='s2')
tab = mad.table
assert 'survey' in list(tab)
assert 'survey' in tab
assert 'foobar' not in tab
assert len(tab) == len(list(tab))
with raises(AttributeError):
tab.foobar
def test_sequence(mad):
mad.input(SEQU)
s1 = mad.sequence.s1
assert str(s1) == '<Sequence: s1>'
assert s1 == mad.sequence.s1
assert s1 == 's1'
assert s1 != mad.sequence.s2
assert s1 != 's2'
with raises(RuntimeError):
s1.beam
with raises(RuntimeError):
s1.twiss_table
with raises(RuntimeError):
s1.twiss_table_name
assert not s1.has_beam
assert not s1.is_expanded
s1.expand()
assert s1.has_beam
assert s1.is_expanded
s1.expand() # idempotent
assert s1.has_beam
assert s1.is_expanded
initial = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5)
mad.twiss(sequence='s1', sectormap=True,
table='my_twiss', **initial)
# Now works:
assert s1.beam.particle == 'positron'
assert s1.twiss_table_name == 'my_twiss'
assert s1.twiss_table.betx[0] == 2.5
assert s1.element_names() == [
's1$start',
'dr', 'qp', 'dr[2]', 'qp[2]', 'dr[3]', 'sb', 'dr[4]',
's1$end',
]
assert s1.expanded_element_names() == s1.element_names()
assert len(s1.element_names()) == len(s1.element_positions())
assert s1.element_positions() == [
0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 7.0, 8.0]
assert s1.expanded_element_positions() == [
0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 7.0, 8.0]
assert s1.elements[0].name == 's1$start'
assert s1.elements[-1].name == 's1$end'
assert s1.elements[-1].index == len(s1.elements)-1
assert s1.elements[3].index == 3
assert s1.elements.index('#s') == 0
assert s1.elements.index('#e') == len(s1.elements)-1
assert s1.elements.index('sb') == 6
assert s1.length == 8.0
def test_sequence_get_elements_s1(mad):
mad.input(SEQU)
s1 = mad.sequence.s1.elements
qp1 = s1['qp[1]']
qp2 = s1['qp[2]']
sb1 = s1['sb[1]']
assert s1.index('qp') < s1.index('qp[2]')
assert s1.index('qp[2]') < s1.index('sb')
assert qp1['at'] == approx(1.5)
assert qp2['at'] == approx(3.5)
assert sb1['at'] == approx(6)
assert qp1.position == approx(1)
assert qp2.position == approx(3)
assert sb1.position == approx(5)
assert qp1['l'] == approx(1)
assert qp2['l'] == approx(1)
assert sb1['l'] == approx(2)
assert float(qp1['k1']) == approx(2)
assert float(qp2['k1']) == approx(2)
assert float(sb1['angle']) == approx(3.14/4)
assert qp1.cmdpar.k1.expr.lower() == "qp_k1"
def test_sequence_get_elements_s2(mad):
mad.input(SEQU)
s2 = mad.sequence.s2.elements
qp1 = s2['qp1[1]']
qp2 = s2['qp2[1]']
assert s2.index('qp1') < s2.index('qp2')
assert qp1['at'] == approx(0)
assert qp2['at'] == approx(1)
assert qp1['l'] == approx(1)
assert qp2['l'] == approx(2)
assert float(qp1['k1']) == approx(3)
assert float(qp2['k1']) == approx(2)
# def test_sequence_get_expanded_elements():
def test_crash(mad):
"""Check that a RuntimeError is raised in case MAD-X crashes."""
assert bool(mad)
# a.t.m. MAD-X crashes on this input, because the L (length)
# parametere is missing:
raises(RuntimeError, mad.input, 'XXX: sequence;')
assert not bool(mad)
def test_sequence_elements(mad):
mad.input(SEQU)
elements = mad.sequence['s1'].elements
iqp2 = elements.index('qp[2]')
qp1 = elements['qp[1]']
qp2 = elements[iqp2]
assert qp1['at'] == approx(1.5)
assert qp2['at'] == approx(3.5)
assert qp1.position == approx(1)
assert qp2.position == approx(3)
assert iqp2 == elements.at(3.1)
def test_sequence_expanded_elements(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s1;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s1')
elements = mad.sequence['s1'].expanded_elements
iqp2 = elements.index('qp[2]')
qp1 = elements['qp[1]']
qp2 = elements[iqp2]
assert qp1['at'] == approx(1.5)
assert qp2['at'] == approx(3.5)
assert qp1.position == approx(1)
assert qp2.position == approx(3)
assert iqp2 == elements.at(3.1)
def test_element_inform(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s1;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s1')
elem = mad.sequence.s1.expanded_elements['qp']
assert {
name for name in elem
if elem.cmdpar[name].inform
} == {'k1', 'l', 'at'}
def test_table(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s1;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s1')
initial = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5)
twiss = mad.twiss(sequence='s1', sectormap=True, **initial)
sector = mad.table.sectortable
assert str(twiss).startswith("<Table 'twiss': ")
assert str(sector).startswith("<Table 'sectortable': ")
assert 'betx' in twiss
assert 't111' in sector
assert 't111' not in twiss
assert 'betx' not in sector
assert len(twiss) == len(list(twiss))
assert set(twiss) == set(twiss[0])
assert twiss.s[5] == twiss[5].s
assert twiss.s[-1] == twiss[-1].s
copy = twiss.copy()
assert copy['betx'] == approx(twiss.betx)
assert set(copy) == set(twiss)
copy = twiss.copy(['betx'])
assert set(copy) == {'betx'}
ALL = slice(None)
assert sector.tmat(0).shape == (6, 6, 6)
assert_allclose(sector.tmat(ALL)[0, 0, 0, :], sector.t111)
assert_allclose(sector.tmat(ALL)[1, 5, 3, :], sector.t264)
assert_allclose(sector.tmat(ALL)[3, 0, 3, :], sector.t414)
assert_allclose(sector.tmat(ALL)[4, 4, 4, :], sector.t555)
assert_allclose(sector.rmat(ALL)[0, 0, :], sector.r11)
assert_allclose(sector.rmat(ALL)[1, 5, :], sector.r26)
assert_allclose(sector.rmat(ALL)[3, 0, :], sector.r41)
assert_allclose(sector.rmat(ALL)[4, 4, :], sector.r55)
assert_allclose(sector.kvec(ALL)[0, :], sector.k1)
assert_allclose(sector.kvec(ALL)[1, :], sector.k2)
assert_allclose(sector.kvec(ALL)[3, :], sector.k4)
assert_allclose(sector.kvec(ALL)[4, :], sector.k5)
r = mad.sectortable()[:, :6, :6]
k = mad.sectortable()[:, 6, :6]
t = mad.sectortable2()
num_elems = len(mad.sequence.s1.elements)
assert t.shape == (num_elems, 6, 6, 6)
assert r.shape == (num_elems, 6, 6)
assert k.shape == (num_elems, 6)
assert_allclose(t[:, 0, 0, 0], sector.t111)
assert_allclose(t[:, 1, 5, 3], sector.t264)
assert_allclose(t[:, 3, 0, 3], sector.t414)
assert_allclose(t[:, 4, 4, 4], sector.t555)
assert_allclose(r[:, 0, 0], sector.r11)
assert_allclose(r[:, 1, 5], sector.r26)
assert_allclose(r[:, 3, 0], sector.r41)
assert_allclose(r[:, 4, 4], sector.r55)
assert_allclose(k[:, 0], sector.k1)
assert_allclose(k[:, 1], sector.k2)
assert_allclose(k[:, 3], sector.k4)
assert_allclose(k[:, 4], sector.k5)
def test_selected_columns(mad, lib):
mad.input(SEQU)
mad.command.beam()
mad.use('s1')
mad.select(flag='twiss', column=['s', 'x', 'y'])
table = mad.twiss(sequence='s1', betx=1, bety=1)
assert set(table) > {'s', 'x', 'y', 'betx', 'bety'}
assert set(table.copy()) > {'s', 'x', 'y', 'betx', 'bety'}
assert table.selected_columns() == ['s', 'x', 'y']
assert table.selection().col_names() == ['s', 'x', 'y']
assert table.selection().copy().keys() == {'s', 'x', 'y'}
mad.select(flag='twiss', clear=True)
mad.select(flag='twiss', column=['betx', 'bety'])
lib.apply_table_selections('twiss')
table = mad.table.twiss
assert set(table) > {'s', 'x', 'y', 'betx', 'bety'}
assert set(table.copy()) > {'s', 'x', 'y', 'betx', 'bety'}
assert table.selected_columns() == ['betx', 'bety']
assert table.selection().col_names() == ['betx', 'bety']
assert table.selection().copy().keys() == {'betx', 'bety'}
def test_table_selected_rows(mad, lib):
mad.input(SEQU)
mad.command.beam()
mad.use('s1')
def check_selection(table, name):
assert_equal(
table.column(name, rows='selected'),
table[name][table.selected_rows()])
assert_equal(
table.column(name, rows='selected'),
table.selection()[name])
mad.select(flag='twiss', class_='quadrupole')
table = mad.twiss(sequence='s1', betx=1, bety=1)
assert table.selected_rows() == [2, 4]
check_selection(table, 'alfx')
check_selection(table, 'alfy')
check_selection(table, 'betx')
check_selection(table, 'bety')
mad.select(flag='twiss', clear=True)
mad.select(flag='twiss', class_='drift')
lib.apply_table_selections('twiss')
table = mad.table.twiss
assert table.selected_rows() == [1, 3, 5, 7]
check_selection(table, 'alfx')
check_selection(table, 'alfy')
check_selection(table, 'betx')
check_selection(table, 'bety')
def test_table_selected_rows_mask(mad, lib):
mad.input(SEQU)
mad.command.beam()
mad.use('s1')
mad.select(flag='twiss', class_='quadrupole')
table = mad.twiss(sequence='s1', betx=1, bety=1)
mask = lib.get_table_selected_rows_mask('twiss')
assert mask.shape == (len(mad.sequence.s1.expanded_elements), )
assert_equal(mask.nonzero(), (table.selected_rows(), ))
def test_attr(mad):
assert hasattr(mad, 'constraint')
assert hasattr(mad, 'constraint_')
assert hasattr(mad, 'global_')
assert not hasattr(mad, 'foobar')
assert not hasattr(mad, '_constraint')
def test_expr(mad):
g = mad.globals
vars = mad.expr_vars
g.foo = 1
g.bar = 2
assert set(vars('foo')) == {'foo'}
assert set(vars('(foo) * sin(2*pi*bar)')) == {'foo', 'bar'}
def test_command(mad):
mad.input(SEQU)
twiss = mad.command.twiss
sbend = mad.elements.sb
clone = sbend.clone('foobar', angle="pi/5", l=1)
assert 'betx=0' in str(twiss)
assert 'angle=' in str(sbend)
assert 'tilt' in sbend
assert sbend.tilt == 0
assert len(sbend) == len(list(sbend))
assert 'tilt' in list(sbend)
assert clone.name == 'foobar'
assert clone.base_type.name == 'sbend'
assert clone.parent.name == 'sb'
assert clone.defs.angle == 'pi / 5'
assert clone.angle == approx(0.6283185307179586)
assert len(clone) == len(sbend)
assert 'angle=0.628' in str(clone)
assert 'tilt' not in str(clone)
clone.angle = 0.125
clone = mad.elements.foobar # need to update cache
assert clone.angle == 0.125
assert len(twiss) == len(list(twiss))
assert 'betx' in list(twiss)
assert clone.angle != approx(clone.parent.angle)
del clone.angle
clone = mad.elements.foobar # need to update cache
assert clone.angle == clone.parent.angle
with raises(AttributeError):
clone.missing_attribute
with raises(NotImplementedError):
del twiss['betx']
with raises(NotImplementedError):
del clone.base_type.angle
def test_array_attribute(mad):
mad.globals.nine = 9
clone = mad.elements.multipole.clone('foo', knl=[0, 'nine/3', 4])
knl = clone.knl
assert knl[0] == 0
assert knl[1] == 3
assert knl[2] == 4
assert len(knl) == 3
assert list(knl) == [0.0, 3.0, 4.0]
assert str(knl) == '[0.0, 3.0, 4.0]'
knl[1] = '3*nine'
assert mad.elements.foo.defs.knl[1] == '3 * nine'
assert mad.elements.foo.knl[1] == 27
def test_array_attribute_comparison(mad):
mad.globals.nine = 9
foo = mad.elements.multipole.clone('foo', knl=[0, 5, 10])
bar_eq = mad.elements.multipole.clone('bar_eq', knl=[0, 5, 10])
bar_gt = mad.elements.multipole.clone('bar_gt', knl=[0, 6, 10])
bar_lt = mad.elements.multipole.clone('bar_lt', knl=[0, 5, 'nine'])
knl = foo.knl
knl_eq = bar_eq.knl
knl_gt = bar_gt.knl
knl_lt = bar_lt.knl
assert knl == knl_eq
assert not (knl == knl_gt)
assert not (knl == knl_lt)
assert not (knl < knl_eq)
assert knl < knl_gt
assert not (knl < knl_lt)
assert knl <= knl_eq
assert knl <= knl_gt
assert not (knl <= knl_lt)
assert not (knl > knl_eq)
assert not (knl > knl_gt)
assert knl > knl_lt
assert knl >= knl_eq
assert not (knl >= knl_gt)
assert knl >= knl_lt
def test_command_map(mad):
command = mad.command
assert 'match' in command
assert 'sbend' in command
assert 'foooo' not in command
assert 'match' in list(command)
assert len(command) == len(list(command))
assert 'match' in str(command)
assert 'sbend' in str(command)
assert 'sbend' in mad.base_types
assert 'match' not in mad.base_types
def test_comments(mad):
var = mad.globals
mad('x = 1; ! x = 2;')
assert var.x == 1
mad('x = 2; // x = 3;')
assert var.x == 2
mad('x = 3; /* x = 4; */')
assert var.x == 3
mad('/* x = 3; */ x = 4;')
assert var.x == 4
mad('x = 5; ! /* */ x = 6;')
assert var.x == 5
mad('x = 5; /* ! */ x = 6;')
assert var.x == 6
def test_multiline_input(mad):
var = mad.globals
mad('''
x = 1;
y = 2;
''')
assert var.x, 1
assert var.y, 2
mad('''
x = /* 3;
y =*/ 4;
''')
assert var.x == 4
assert var.y == 2
mad('''
x = 1; /* */ x = 2;
*/ if (x == 1) {
x = 3;
}
''')
assert var.x == 2
mad('''
x = 1; /* x = 2;
*/ if (x == 1) {
x = 3;
}
''')
assert var.x == 3
def test_errors(mad):
mad.input(SEQU)
mad.beam()
mad.use(sequence='s1')
mad.select(flag='error', range='qp')
dkn = [1e-6, 2e-6, 3e-6]
dks = [4e-6, 5e-6, 6e-6]
mad.efcomp(dkn=dkn, dks=dks)
mad.ealign(dx=1e-3, dy=-4e-3)
fd = mad.sequence['s1'].expanded_elements['qp'].field_errors
al = mad.sequence['s1'].expanded_elements['qp'].align_errors
expected_dkn = np.hstack((dkn, np.zeros(len(fd.dkn) - len(dkn))))
expected_dks = np.hstack((dks, np.zeros(len(fd.dks) - len(dks))))
assert_allclose(fd.dkn, expected_dkn)
assert_allclose(fd.dks, expected_dks)
assert_allclose(al.dx, 1e-3)
assert_allclose(al.dy, -4e-3)
def test_subsequence(mad):
mad.input("""
d1: RBEND, l=0.1, angle=0.1;
seq1: sequence, l=0.1;
d1.1: d1, at=0.05;
endsequence;
seq2: sequence, l=0.2;
seq1, at=0.05;
seq1, at=0.15;
endsequence;
""")
seq2 = mad.sequence.seq2
assert isinstance(seq2.elements['seq1'], Sequence)
assert seq2.elements['seq1'].name == 'seq1'
assert seq2.elements['seq1'].element_names() == \
mad.sequence.seq1.element_names()
def test_dframe_after_use(mad):
mad.input("""
mqf.k1 = 0.3037241107;
mqd.k1 = -0.3037241107;
fodo: sequence, l=10, refer=entry;
mqf: quadrupole, at=0, l=1, k1:=mqf.k1;
dff: drift, at=1, l=4;
mqd: quadrupole, at=5, l=1, k1:=mqd.k1;
dfd: drift, at=6, l=4;
endsequence;
beam;
use, sequence=fodo;
twiss, sequence=fodo, x=0.1;
""")
index = ['#s', 'mqf', 'dff', 'mqd', 'dfd', '#e']
names = ['fodo$start', 'mqf', 'dff', 'mqd', 'dfd', 'fodo$end']
twiss = mad.table.twiss
assert index == twiss.row_names()
assert index == twiss.dframe().index.tolist()
assert names == twiss.dframe(index='name').index.tolist()
mad.use(sequence='fodo')
twiss = mad.table.twiss
# Should still work:
assert names == twiss.dframe(index='name').index.tolist()
# The following assert demonstrates the current behaviour and is
# meant to detect if the MAD-X implementation changes. It may lead
# to crashes or change in the future. In that case, please remove
# this line. It does not represent desired behaviour!
assert mad.table.twiss.row_names() == \
['#s', '#e', 'dfd', 'mqd', 'dff', 'mqf']
| 24,594
| 0
| 1,077
|
beca016de282d8ad828e46810f9fa27aac015a7f
| 4,852
|
py
|
Python
|
geneticpython/engines/single_objective/single_objective_engine.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
geneticpython/engines/single_objective/single_objective_engine.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
geneticpython/engines/single_objective/single_objective_engine.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
"""
File: single_objective_engine.py
Author: ngocjr7
Email: [email protected]
Github: https://github.com/ngocjr7
Description:
"""
from __future__ import absolute_import
from typing import List, Union, Callable
from functools import wraps
from collections import OrderedDict
from ..geneticengine import GeneticEngine
from ...core.population import Population
from ...core.operators import Selection, Crossover, Mutation, Replacement
from ...core.individual import Individual
from ...callbacks import Callback, CallbackList
from ...callbacks import History
import math
| 38.816
| 83
| 0.557708
|
"""
File: single_objective_engine.py
Author: ngocjr7
Email: [email protected]
Github: https://github.com/ngocjr7
Description:
"""
from __future__ import absolute_import
from typing import List, Union, Callable
from functools import wraps
from collections import OrderedDict
from ..geneticengine import GeneticEngine
from ...core.population import Population
from ...core.operators import Selection, Crossover, Mutation, Replacement
from ...core.individual import Individual
from ...callbacks import Callback, CallbackList
from ...callbacks import History
import math
class SingleObjectiveEngine(GeneticEngine):
def __init__(self, population: Population,
objective: Callable[[Individual], Union[float, int]] = None,
selection: Selection = None,
selection_size: int = None,
crossover: Crossover = None,
mutation: Mutation = None,
replacement: Replacement = None,
callbacks: List[Callback] = None,
generations: int = 100,
random_state: int = None):
callback_list = CallbackList(
callbacks, add_history=True, add_progbar=True)
super(SingleObjectiveEngine, self).__init__(population=population,
objective=objective,
selection=selection,
selection_size=selection_size,
crossover=crossover,
mutation=mutation,
replacement=replacement,
callbacks=callback_list,
generations=generations,
random_state=random_state)
def get_best_indv(self) -> Individual:
best_indv = min(self.population.individuals,
key=lambda indv: indv._objective)
return best_indv.clone()
def _update_metrics(self):
self.metrics = self.metrics or OrderedDict()
self.metrics['best_objective'] = self.get_best_indv().objective
def _update_logs(self, logs):
logs = logs or {}
logs.update(self.metrics or OrderedDict())
return logs
def compute_objectives(self, population: List[Individual]) -> List[Individual]:
ret = list()
# compute objectives
for indv in population:
if self.objective is None:
raise ValueError(f"Engine has no registered objective functions")
indv._coefficient = self.coefficient
indv._objective = self.objective(indv)
ret.append(indv)
return ret
def minimize_objective(self, fn):
"""
register objective function
"""
@wraps(fn)
def _fn_minimization_with_objective_check(indv):
'''
A wrapper function for objective function with objective value check.
'''
# Check indv type.
if not isinstance(indv, Individual):
raise TypeError(
'indv\'s class must be subclass of IndividualBase')
# Check objective.
objective = float(fn(indv))
is_invalid = not isinstance(
objective, (float, int)) or (math.isnan(objective))
if is_invalid:
msg = 'objective value(value: {}, type: {}) is invalid'
msg = msg.format(objective, type(objective))
raise ValueError(msg)
return objective
self.objective = _fn_minimization_with_objective_check
self.coefficient = 1
def maximize_objective(self, fn):
"""
register maximization of objective function
"""
@wraps(fn)
def _fn_maximization_with_objective_check(indv):
'''
A wrapper function for objective function with objective value check.
'''
# Check indv type.
if not isinstance(indv, Individual):
raise TypeError(
'indv\'s class must be subclass of IndividualBase')
# Check objective.
objective = float(fn(indv))
is_invalid = not isinstance(
objective, (float, int)) or (math.isnan(objective))
if is_invalid:
msg = 'objective value(value: {}, type: {}) is invalid'
msg = msg.format(objective, type(objective))
raise ValueError(msg)
return -objective
self.objective = _fn_maximization_with_objective_check
self.coefficient = -1
| 2,127
| 2,130
| 23
|
6c1a2218b3975b6c65e1c36ce24d867d86a06bee
| 517
|
py
|
Python
|
timing.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
timing.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
timing.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
# timing.py
import datetime, calendar
today = datetime.date.today()
yesterday = today - datetime.timedelta(days = 1)
tomorrow = today + datetime.timedelta(days = 1)
print(yesterday, today, tomorrow)
# -------------------------
'''
last_friday = datetime.date.today()
oneday = datetime.timedelta(days = 1)
while last_friday.weekday() != calendar.FRIDAY :
last_friday = oneday
print(last_friday.strftime('%A, %d-%b-%Y'))
'''
t = datetime.datetime(2012,9,3,21,30)
k = datetime.date.today()
print(t, '\n', k)
| 19.884615
| 48
| 0.659574
|
# timing.py
import datetime, calendar
today = datetime.date.today()
yesterday = today - datetime.timedelta(days = 1)
tomorrow = today + datetime.timedelta(days = 1)
print(yesterday, today, tomorrow)
# -------------------------
'''
last_friday = datetime.date.today()
oneday = datetime.timedelta(days = 1)
while last_friday.weekday() != calendar.FRIDAY :
last_friday = oneday
print(last_friday.strftime('%A, %d-%b-%Y'))
'''
t = datetime.datetime(2012,9,3,21,30)
k = datetime.date.today()
print(t, '\n', k)
| 0
| 0
| 0
|
1676722d3f346db563fa9c4d25ad5528e4cd54fa
| 25,385
|
py
|
Python
|
manticore/core/smtlib/visitors.py
|
Srinivas11789/manticore
|
af3c6aada811833864efaccef7477f14e9b5e0dd
|
[
"Apache-2.0"
] | null | null | null |
manticore/core/smtlib/visitors.py
|
Srinivas11789/manticore
|
af3c6aada811833864efaccef7477f14e9b5e0dd
|
[
"Apache-2.0"
] | null | null | null |
manticore/core/smtlib/visitors.py
|
Srinivas11789/manticore
|
af3c6aada811833864efaccef7477f14e9b5e0dd
|
[
"Apache-2.0"
] | null | null | null |
from manticore.utils.helpers import CacheDict
from .expression import *
from functools import lru_cache
import logging
import operator
logger = logging.getLogger(__name__)
class Visitor(object):
''' Class/Type Visitor
Inherit your class visitor from this one and get called on a different
visiting function for each type of expression. It will call the first
implemented method for the __mro__ class order.
For example for a BitVecAdd it will try
visit_BitVecAdd() if not defined then it will try with
visit_BitVecOperation() if not defined then it will try with
visit_BitVec() if not defined then it will try with
visit_Operation() if not defined then it will try with
visit_Expression()
Other class named visitors are:
visit_Constant()
visit_Variable()
visit_Operation()
visit_BitVec()
visit_Bool()
visit_Array()
'''
@property
def visit(self, node, use_fixed_point=False):
'''
The entry point of the visitor.
The exploration algorithm is a DFS post-order traversal
The implementation used two stacks instead of a recursion
The final result is store in self.result
:param node: Node to explore
:type node: Expression
:param use_fixed_point: if True, it runs _methods until a fixed point is found
:type use_fixed_point: Bool
'''
cache = self._cache
visited = set()
stack = []
stack.append(node)
while stack:
node = stack.pop()
if node in cache:
self.push(cache[node])
elif isinstance(node, Operation):
if node in visited:
operands = [self.pop() for _ in range(len(node.operands))]
value = self._method(node, *operands)
visited.remove(node)
self.push(value)
cache[node] = value
else:
visited.add(node)
stack.append(node)
stack.extend(node.operands)
else:
self.push(self._method(node))
if use_fixed_point:
old_value = None
new_value = self.pop()
while old_value is not new_value:
self.visit(new_value)
old_value = new_value
new_value = self.pop()
self.push(new_value)
@staticmethod
class Translator(Visitor):
''' Simple visitor to translate an expression into something else
'''
class GetDeclarations(Visitor):
''' Simple visitor to collect all variables in an expression or set of
expressions
'''
@property
class GetDepth(Translator):
''' Simple visitor to collect all variables in an expression or set of
expressions
'''
constant_folder_simplifier_cache = CacheDict(max_size=150000, flush_perc=25)
@lru_cache(maxsize=128)
arithmetic_simplifier_cache = CacheDict(max_size=150000, flush_perc=25)
@lru_cache(maxsize=128)
@lru_cache(maxsize=128)
class TranslatorSmtlib(Translator):
''' Simple visitor to translate an expression to its smtlib representation
'''
unique = 0
@property
translation_table = {
BoolNot: 'not',
BoolEq: '=',
BoolAnd: 'and',
BoolOr: 'or',
BoolXor: 'xor',
BoolITE: 'ite',
BitVecAdd: 'bvadd',
BitVecSub: 'bvsub',
BitVecMul: 'bvmul',
BitVecDiv: 'bvsdiv',
BitVecUnsignedDiv: 'bvudiv',
BitVecMod: 'bvsmod',
BitVecRem: 'bvsrem',
BitVecUnsignedRem: 'bvurem',
BitVecShiftLeft: 'bvshl',
BitVecShiftRight: 'bvlshr',
BitVecArithmeticShiftLeft: 'bvashl',
BitVecArithmeticShiftRight: 'bvashr',
BitVecAnd: 'bvand',
BitVecOr: 'bvor',
BitVecXor: 'bvxor',
BitVecNot: 'bvnot',
BitVecNeg: 'bvneg',
LessThan: 'bvslt',
LessOrEqual: 'bvsle',
Equal: '=',
GreaterThan: 'bvsgt',
GreaterOrEqual: 'bvsge',
UnsignedLessThan: 'bvult',
UnsignedLessOrEqual: 'bvule',
UnsignedGreaterThan: 'bvugt',
UnsignedGreaterOrEqual: 'bvuge',
BitVecSignExtend: '(_ sign_extend %d)',
BitVecZeroExtend: '(_ zero_extend %d)',
BitVecExtract: '(_ extract %d %d)',
BitVecConcat: 'concat',
BitVecITE: 'ite',
ArrayStore: 'store',
ArraySelect: 'select',
}
@property
@property
class Replace(Visitor):
''' Simple visitor to replaces expressions '''
| 34.869505
| 167
| 0.593146
|
from manticore.utils.helpers import CacheDict
from .expression import *
from functools import lru_cache
import logging
import operator
logger = logging.getLogger(__name__)
class Visitor(object):
''' Class/Type Visitor
Inherit your class visitor from this one and get called on a different
visiting function for each type of expression. It will call the first
implemented method for the __mro__ class order.
For example for a BitVecAdd it will try
visit_BitVecAdd() if not defined then it will try with
visit_BitVecOperation() if not defined then it will try with
visit_BitVec() if not defined then it will try with
visit_Operation() if not defined then it will try with
visit_Expression()
Other class named visitors are:
visit_Constant()
visit_Variable()
visit_Operation()
visit_BitVec()
visit_Bool()
visit_Array()
'''
def __init__(self, cache=None, **kwargs):
super().__init__()
self._stack = []
self._cache = {} if cache is None else cache
def push(self, value):
assert value is not None
self._stack.append(value)
def pop(self):
if len(self._stack) == 0:
return None
result = self._stack.pop()
return result
@property
def result(self):
assert len(self._stack) == 1
return self._stack[-1]
def _method(self, expression, *args):
#Special case. Need to get the unsleeved version of the array
if isinstance(expression, ArrayProxy):
expression = expression.array
assert expression.__class__.__mro__[-1] is object
for cls in expression.__class__.__mro__:
sort = cls.__name__
methodname = 'visit_%s' % sort
if hasattr(self, methodname):
value = getattr(self, methodname)(expression, *args)
if value is not None:
assert isinstance(value, Expression)
return value
return self._rebuild(expression, args)
def visit(self, node, use_fixed_point=False):
'''
The entry point of the visitor.
The exploration algorithm is a DFS post-order traversal
The implementation used two stacks instead of a recursion
The final result is store in self.result
:param node: Node to explore
:type node: Expression
:param use_fixed_point: if True, it runs _methods until a fixed point is found
:type use_fixed_point: Bool
'''
cache = self._cache
visited = set()
stack = []
stack.append(node)
while stack:
node = stack.pop()
if node in cache:
self.push(cache[node])
elif isinstance(node, Operation):
if node in visited:
operands = [self.pop() for _ in range(len(node.operands))]
value = self._method(node, *operands)
visited.remove(node)
self.push(value)
cache[node] = value
else:
visited.add(node)
stack.append(node)
stack.extend(node.operands)
else:
self.push(self._method(node))
if use_fixed_point:
old_value = None
new_value = self.pop()
while old_value is not new_value:
self.visit(new_value)
old_value = new_value
new_value = self.pop()
self.push(new_value)
@staticmethod
def _rebuild(expression, operands):
if isinstance(expression, Constant):
return expression
if isinstance(expression, Operation):
if any(x is not y for x, y in zip(expression.operands, operands)):
import copy
aux = copy.copy(expression)
aux._operands = operands
return aux
return expression
class Translator(Visitor):
''' Simple visitor to translate an expression into something else
'''
def _method(self, expression, *args):
#Special case. Need to get the unsleeved version of the array
if isinstance(expression, ArrayProxy):
expression = expression.array
assert expression.__class__.__mro__[-1] is object
for cls in expression.__class__.__mro__:
sort = cls.__name__
methodname = 'visit_{:s}'.format(sort)
if hasattr(self, methodname):
value = getattr(self, methodname)(expression, *args)
if value is not None:
return value
raise Exception("No translation for this {}".format(expression))
class GetDeclarations(Visitor):
''' Simple visitor to collect all variables in an expression or set of
expressions
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.variables = set()
def visit_Variable(self, expression):
self.variables.add(expression)
@property
def result(self):
return self.variables
class GetDepth(Translator):
''' Simple visitor to collect all variables in an expression or set of
expressions
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def visit_Expression(self, expression):
return 1
def visit_Operation(self, expression, *operands):
return 1 + max(operands)
def get_depth(exp):
visitor = GetDepth()
visitor.visit(exp)
return visitor.result
class PrettyPrinter(Visitor):
def __init__(self, depth=None, **kwargs):
super().__init__(**kwargs)
self.output = ''
self.indent = 0
self.depth = depth
def _print(self, s, e=None):
self.output += ' ' * self.indent + str(s) # + '(%016x)'%hash(e)
self.output += '\n'
def visit(self, expression):
'''
Overload Visitor.visit because:
- We need a pre-order traversal
- We use a recursion as it makes it easier to keep track of the indentation
'''
self._method(expression)
def _method(self, expression, *args):
'''
Overload Visitor._method because we want to stop to iterate over the
visit_ functions as soon as a valid visit_ function is found
'''
assert expression.__class__.__mro__[-1] is object
for cls in expression.__class__.__mro__:
sort = cls.__name__
methodname = 'visit_%s' % sort
method = getattr(self, methodname, None)
if method is not None:
method(expression, *args)
return
return
def visit_Operation(self, expression, *operands):
self._print(expression.__class__.__name__, expression)
self.indent += 2
if self.depth is None or self.indent < self.depth * 2:
for o in expression.operands:
self.visit(o)
else:
self._print('...')
self.indent -= 2
return ''
def visit_BitVecExtract(self, expression):
self._print(expression.__class__.__name__ + '{%d:%d}' % (expression.begining, expression.end), expression)
self.indent += 2
if self.depth is None or self.indent < self.depth * 2:
for o in expression.operands:
self.visit(o)
else:
self._print('...')
self.indent -= 2
return ''
def visit_Constant(self, expression):
self._print(expression.value)
return ''
def visit_Variable(self, expression):
self._print(expression.name)
return ''
@property
def result(self):
return self.output
def pretty_print(expression, **kwargs):
if not isinstance(expression, Expression):
return str(expression)
pp = PrettyPrinter(**kwargs)
pp.visit(expression)
return pp.result
class ConstantFolderSimplifier(Visitor):
def __init__(self, **kw):
super().__init__(**kw)
operations = {BitVecAdd: operator.__add__,
BitVecSub: operator.__sub__,
BitVecMul: operator.__mul__,
BitVecDiv: operator.__truediv__,
BitVecShiftLeft: operator.__lshift__,
BitVecShiftRight: operator.__rshift__,
BitVecAnd: operator.__and__,
BitVecOr: operator.__or__,
BitVecXor: operator.__xor__,
BitVecNot: operator.__not__,
BitVecNeg: operator.__invert__,
LessThan: operator.__lt__,
LessOrEqual: operator.__le__,
Equal: operator.__eq__,
GreaterThan: operator.__gt__,
GreaterOrEqual: operator.__ge__,
BoolAnd: operator.__and__,
BoolOr: operator.__or__,
BoolNot: operator.__not__}
def visit_BitVecConcat(self, expression, *operands):
if all(isinstance(o, Constant) for o in operands):
result = 0
for o in operands:
result <<= o.size
result |= o.value
return BitVecConstant(expression.size, result, taint=expression.taint)
def visit_BitVecZeroExtend(self, expression, *operands):
if all(isinstance(o, Constant) for o in operands):
return BitVecConstant(expression.size, operands[0].value, taint=expression.taint)
def visit_BitVecSignExtend(self, expression, *operands):
if expression.extend == 0:
return operands[0]
def visit_BitVecExtract(self, expression, *operands):
if all(isinstance(o, Constant) for o in expression.operands):
value = expression.operands[0].value
begining = expression.begining
end = expression.end
value = value >> begining
mask = 2**(end - begining + 1) - 1
value = value & mask
return BitVecConstant(expression.size, value, taint=expression.taint)
def visit_BoolAnd(self, expression, a, b):
if isinstance(a, Constant) and a.value == True:
return b
if isinstance(b, Constant) and b.value == True:
return a
def visit_BoolOr(self, expression, a, b):
if isinstance(a, Constant) and a.value == False:
return b
if isinstance(b, Constant) and b.value == False:
return a
def visit_Operation(self, expression, *operands):
''' constant folding, if all operands of an expression are a Constant do the math '''
operation = self.operations.get(type(expression), None)
if operation is not None and \
all(isinstance(o, Constant) for o in operands):
value = operation(*(x.value for x in operands))
if isinstance(expression, BitVec):
return BitVecConstant(expression.size, value, taint=expression.taint)
else:
isinstance(expression, Bool)
return BoolConstant(value, taint=expression.taint)
else:
if any(operands[i] is not expression.operands[i] for i in range(len(operands))):
expression = self._rebuild(expression, operands)
return expression
constant_folder_simplifier_cache = CacheDict(max_size=150000, flush_perc=25)
@lru_cache(maxsize=128)
def constant_folder(expression):
global constant_folder_simplifier_cache
simp = ConstantFolderSimplifier(cache=constant_folder_simplifier_cache)
simp.visit(expression, use_fixed_point=True)
return simp.result
class ArithmeticSimplifier(Visitor):
def __init__(self, parent=None, **kw):
super().__init__(**kw)
@staticmethod
def _same_constant(a, b):
return isinstance(a, Constant) and\
isinstance(b, Constant) and\
a.value == b.value or a is b
@staticmethod
def _changed(expression, operands):
if isinstance(expression, Constant) and len(operands) > 0:
return True
arity = len(operands)
return any(operands[i] is not expression.operands[i] for i in range(arity))
def visit_Operation(self, expression, *operands):
''' constant folding, if all operands of an expression are a Constant do the math '''
if all(isinstance(o, Constant) for o in operands):
expression = constant_folder(expression)
if self._changed(expression, operands):
expression = self._rebuild(expression, operands)
return expression
def visit_BitVecZeroExtend(self, expression, *operands):
if self._changed(expression, operands):
return BitVecZeroExtend(expression.size, *operands, taint=expression.taint)
else:
return expression
def visit_BitVecITE(self, expression, *operands):
if isinstance(expression.operands[0], Constant):
if expression.operands[0].value:
result = expression.operands[1]
else:
result = expression.operands[2]
import copy
result = copy.copy(result)
result._taint |= expression.operands[0].taint
return result
if self._changed(expression, operands):
return BitVecITE(expression.size, *operands, taint=expression.taint)
def visit_BitVecExtract(self, expression, *operands):
''' extract(sizeof(a), 0)(a) ==> a
extract(16, 0)( concat(a,b,c,d) ) => concat(c, d)
extract(m,M)(and/or/xor a b ) => and/or/xor((extract(m,M) a) (extract(m,M) a)
'''
op = expression.operands[0]
begining = expression.begining
end = expression.end
# extract(sizeof(a), 0)(a) ==> a
if begining == 0 and end + 1 == op.size:
return op
elif isinstance(op, BitVecConcat):
new_operands = []
bitcount = 0
for item in reversed(op.operands):
if begining >= item.size:
begining -= item.size
else:
if bitcount < expression.size:
new_operands.append(item)
bitcount += item.size
if begining != expression.begining:
return BitVecExtract(BitVecConcat(sum([x.size for x in new_operands]), *reversed(new_operands)),
begining, expression.size, taint=expression.taint)
if isinstance(op, (BitVecAnd, BitVecOr, BitVecXor)):
bitoperand_a, bitoperand_b = op.operands
return op.__class__(BitVecExtract(bitoperand_a, begining, expression.size), BitVecExtract(bitoperand_b, begining, expression.size), taint=expression.taint)
def visit_BitVecAdd(self, expression, *operands):
''' a + 0 ==> a
0 + a ==> a
'''
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return left
if isinstance(left, BitVecConstant):
if left.value == 0:
return right
def visit_BitVecSub(self, expression, *operands):
''' a - 0 ==> 0
(a + b) - b ==> a
(b + a) - b ==> a
'''
left = expression.operands[0]
right = expression.operands[1]
if isinstance(left, BitVecAdd):
if self._same_constant(left.operands[0], right):
return left.operands[1]
elif self._same_constant(left.operands[1], right):
return left.operands[0]
def visit_BitVecOr(self, expression, *operands):
''' a | 0 => a
0 | a => a
0xffffffff & a => 0xffffffff
a & 0xffffffff => 0xffffffff
'''
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return left
elif right.value == left.mask:
return right
elif isinstance(left, BitVecOr):
left_left = left.operands[0]
left_right = left.operands[1]
if isinstance(right, Constant):
return BitVecOr(left_left, (left_right | right), taint=expression.taint)
elif isinstance(left, BitVecConstant):
return BitVecOr(right, left, taint=expression.taint)
def visit_BitVecAnd(self, expression, *operands):
''' ct & x => x & ct move constants to the right
a & 0 => 0 remove zero
a & 0xffffffff => a remove full mask
(b & ct2) & ct => b & (ct&ct2) associative property
(a & (b | c) => a&b | a&c distribute over |
'''
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return right
elif right.value == right.mask:
return left
elif isinstance(left, BitVecAnd):
left_left = left.operands[0]
left_right = left.operands[1]
if isinstance(right, Constant):
return BitVecAnd(left_left, left_right & right, taint=expression.taint)
elif isinstance(left, BitVecOr):
left_left = left.operands[0]
left_right = left.operands[1]
return BitVecOr(right & left_left, right & left_right, taint=expression.taint)
elif isinstance(left, BitVecConstant):
return BitVecAnd(right, left, taint=expression.taint)
def visit_BitVecShiftLeft(self, expression, *operands):
''' a << 0 => a remove zero
a << ct => 0 if ct > sizeof(a) remove big constant shift
'''
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return left
elif right.value >= right.size:
return left
def visit_ArraySelect(self, expression, *operands):
''' ArraySelect (ArrayStore((ArrayStore(x0,v0) ...),xn, vn), x0)
-> v0
'''
arr, index = operands
if isinstance(arr, ArrayVariable):
return
if isinstance(index, BitVecConstant):
ival = index.value
# props are slow and using them in tight loops should be avoided, esp when they offer no additional validation
# arr._operands[1] = arr.index, arr._operands[0] = arr.array
while isinstance(arr, ArrayStore) and isinstance(arr._operands[1], BitVecConstant) and arr._operands[1]._value != ival:
arr = arr._operands[0] # arr.array
if isinstance(index, BitVecConstant) and isinstance(arr, ArrayStore) and isinstance(arr.index, BitVecConstant) and arr.index.value == index.value:
return arr.value
else:
if arr is not expression.array:
return arr.select(index)
def visit_Expression(self, expression, *operands):
assert len(operands) == 0
assert not isinstance(expression, Operation)
return expression
arithmetic_simplifier_cache = CacheDict(max_size=150000, flush_perc=25)
@lru_cache(maxsize=128)
def arithmetic_simplify(expression):
global arithmetic_simplifier_cache
simp = ArithmeticSimplifier(cache=arithmetic_simplifier_cache)
simp.visit(expression, use_fixed_point=True)
return simp.result
def to_constant(expression):
value = arithmetic_simplify(expression)
if isinstance(value, Constant):
return value.value
elif isinstance(value, Array):
if value.index_max:
ba = bytearray()
for i in range(value.index_max):
value_i = simplify(value[i])
if not isinstance(value_i, Constant):
break
ba.append(value_i.value)
else:
return ba
return value
@lru_cache(maxsize=128)
def simplify(expression):
expression = constant_folder(expression)
expression = arithmetic_simplify(expression)
return expression
class TranslatorSmtlib(Translator):
''' Simple visitor to translate an expression to its smtlib representation
'''
unique = 0
def __init__(self, use_bindings=False, *args, **kw):
assert 'bindings' not in kw
super().__init__(*args, **kw)
self.use_bindings = use_bindings
self._bindings_cache = {}
self._bindings = []
def _add_binding(self, expression, smtlib):
if not self.use_bindings or len(smtlib) <= 10:
return smtlib
if smtlib in self._bindings_cache:
return self._bindings_cache[smtlib]
TranslatorSmtlib.unique += 1
name = 'a_%d' % TranslatorSmtlib.unique
self._bindings.append((name, expression, smtlib))
self._bindings_cache[expression] = name
return name
@property
def bindings(self):
return self._bindings
translation_table = {
BoolNot: 'not',
BoolEq: '=',
BoolAnd: 'and',
BoolOr: 'or',
BoolXor: 'xor',
BoolITE: 'ite',
BitVecAdd: 'bvadd',
BitVecSub: 'bvsub',
BitVecMul: 'bvmul',
BitVecDiv: 'bvsdiv',
BitVecUnsignedDiv: 'bvudiv',
BitVecMod: 'bvsmod',
BitVecRem: 'bvsrem',
BitVecUnsignedRem: 'bvurem',
BitVecShiftLeft: 'bvshl',
BitVecShiftRight: 'bvlshr',
BitVecArithmeticShiftLeft: 'bvashl',
BitVecArithmeticShiftRight: 'bvashr',
BitVecAnd: 'bvand',
BitVecOr: 'bvor',
BitVecXor: 'bvxor',
BitVecNot: 'bvnot',
BitVecNeg: 'bvneg',
LessThan: 'bvslt',
LessOrEqual: 'bvsle',
Equal: '=',
GreaterThan: 'bvsgt',
GreaterOrEqual: 'bvsge',
UnsignedLessThan: 'bvult',
UnsignedLessOrEqual: 'bvule',
UnsignedGreaterThan: 'bvugt',
UnsignedGreaterOrEqual: 'bvuge',
BitVecSignExtend: '(_ sign_extend %d)',
BitVecZeroExtend: '(_ zero_extend %d)',
BitVecExtract: '(_ extract %d %d)',
BitVecConcat: 'concat',
BitVecITE: 'ite',
ArrayStore: 'store',
ArraySelect: 'select',
}
def visit_BitVecConstant(self, expression):
assert isinstance(expression, BitVecConstant)
if expression.size == 1:
return '#' + bin(expression.value & expression.mask)[1:]
else:
return '#x%0*x' % (int(expression.size / 4), expression.value & expression.mask)
def visit_BoolConstant(self, expression):
return expression.value and 'true' or 'false'
def visit_Variable(self, expression):
return expression.name
def visit_ArraySelect(self, expression, *operands):
array_smt, index_smt = operands
if isinstance(expression.array, ArrayStore):
array_smt = self._add_binding(expression.array, array_smt)
return '(select %s %s)' % (array_smt, index_smt)
def visit_Operation(self, expression, *operands):
operation = self.translation_table[type(expression)]
if isinstance(expression, (BitVecSignExtend, BitVecZeroExtend)):
operation = operation % expression.extend
elif isinstance(expression, BitVecExtract):
operation = operation % (expression.end, expression.begining)
operands = [self._add_binding(*x) for x in zip(expression.operands, operands)]
return '(%s %s)' % (operation, ' '.join(operands))
@property
def results(self):
raise Exception("NOOO")
@property
def result(self):
output = super().result
if self.use_bindings:
for name, expr, smtlib in reversed(self._bindings):
output = '( let ((%s %s)) %s )' % (name, smtlib, output)
return output
def translate_to_smtlib(expression, **kwargs):
translator = TranslatorSmtlib(**kwargs)
translator.visit(expression)
return translator.result
class Replace(Visitor):
''' Simple visitor to replaces expressions '''
def __init__(self, bindings=None, **kwargs):
super().__init__(**kwargs)
if bindings is None:
raise ValueError("bindings needed in replace")
self._replace_bindings = bindings
def visit_Variable(self, expression):
if expression in self._replace_bindings:
return self._replace_bindings[expression]
return expression
def replace(expression, bindings):
if not bindings:
return expression
visitor = Replace(bindings)
visitor.visit(expression, use_fixed_point=True)
result_expression = visitor.result
#for var in get_variables(result_expression):
# assert var not in bindings
return result_expression
def get_variables(expression):
visitor = GetDeclarations()
visitor.visit(expression)
return visitor.result
| 10,256
| 9,441
| 942
|
34a65f614b2aed9614eeb0a853f10c891d51443b
| 280
|
py
|
Python
|
account/urls.py
|
Wizock/CRUD-master
|
07fbf3c64610a8725724fc934e66c6be35690cc9
|
[
"CC0-1.0"
] | 1
|
2022-03-13T09:50:04.000Z
|
2022-03-13T09:50:04.000Z
|
account/urls.py
|
Wizock/TodoButBetter
|
07fbf3c64610a8725724fc934e66c6be35690cc9
|
[
"CC0-1.0"
] | null | null | null |
account/urls.py
|
Wizock/TodoButBetter
|
07fbf3c64610a8725724fc934e66c6be35690cc9
|
[
"CC0-1.0"
] | null | null | null |
from django import urls
from django.conf.urls import include, url
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.urls import path
from .views import *
urlpatterns = [
path(r'user/<str:usr>/', accountView),
path('register_/', register_),
]
| 23.333333
| 61
| 0.739286
|
from django import urls
from django.conf.urls import include, url
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.urls import path
from .views import *
urlpatterns = [
path(r'user/<str:usr>/', accountView),
path('register_/', register_),
]
| 0
| 0
| 0
|
eb106290ccfcc64601c7996ff5449b815f8ad55c
| 1,020
|
py
|
Python
|
pyaibot.py
|
linsicheng20060818/PythonExercises
|
dff362b066de54186d8e2a71f0fb6b8fcb1c8f2a
|
[
"MIT"
] | 2
|
2019-01-05T13:34:08.000Z
|
2019-01-06T05:33:17.000Z
|
pyaibot.py
|
linsicheng20060818/PythonExercises
|
dff362b066de54186d8e2a71f0fb6b8fcb1c8f2a
|
[
"MIT"
] | null | null | null |
pyaibot.py
|
linsicheng20060818/PythonExercises
|
dff362b066de54186d8e2a71f0fb6b8fcb1c8f2a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""try:
import jieba
except:
print("please install jieba first.")
input("press any key to continue")
quit()"""
| 25.5
| 73
| 0.435294
|
# -*- coding: utf-8 -*-
"""try:
import jieba
except:
print("please install jieba first.")
input("press any key to continue")
quit()"""
def chchat(a):
import jieba
v=False
#if a=="quit" or a=="exit" or a=="退出" or a=="再见":
# import os
# exit()#Error
list1=jieba.lcut(a)#jieba分词
#print(list1)#Debug
i=0
b=""
if list1[i]=="你好":
return(a)
else:
for i in range(len(list1)):
if list1[i]=="你":
list1[i]="我"
elif list1[i]=="我":
list1[i]="你"
elif list1[i]=="几":
import random
v=True
'''for r in range(len(ni)):#'你'换成'我'
list1[r]="我"
for i in range(len(wo)):#'我'换成'你'
list1[i]="你"'''
for i in range(len(list1)):
b=b+list1[i]
if v==True:
return(random.randint(-10,2000))
else:
return((b.replace("吗","").replace("?","!")).replace("?","!"))
| 900
| 0
| 22
|
040bfae6c7070cefcd380adace083b08384a141a
| 391
|
py
|
Python
|
map_annotate_app/admin.py
|
tushar-agarwal/WikiNearby
|
0cc10bdeb1cb0728a6405808cc25f2d9e65dcb95
|
[
"MIT"
] | 2
|
2018-03-20T21:30:35.000Z
|
2019-03-19T04:58:42.000Z
|
map_annotate_app/admin.py
|
tushar-agarwal/map_annotate
|
0cc10bdeb1cb0728a6405808cc25f2d9e65dcb95
|
[
"MIT"
] | 2
|
2016-08-21T13:21:51.000Z
|
2016-09-07T10:01:24.000Z
|
map_annotate_app/admin.py
|
tushar-agarwal/WikiNearby
|
0cc10bdeb1cb0728a6405808cc25f2d9e65dcb95
|
[
"MIT"
] | 2
|
2016-10-06T13:47:24.000Z
|
2017-02-13T23:10:12.000Z
|
"""
This is the C{admin.py} file for C{map_annotate_app}.
For more details, see the documentation for C{map_annotate_app}.
"""
from django.contrib import admin
from .models import Crime
from .models import CrimeType
from .models import Location
from .models import Sansad
admin.site.register(Location)
admin.site.register(Crime)
admin.site.register(CrimeType)
admin.site.register(Sansad)
| 23
| 64
| 0.792839
|
"""
This is the C{admin.py} file for C{map_annotate_app}.
For more details, see the documentation for C{map_annotate_app}.
"""
from django.contrib import admin
from .models import Crime
from .models import CrimeType
from .models import Location
from .models import Sansad
admin.site.register(Location)
admin.site.register(Crime)
admin.site.register(CrimeType)
admin.site.register(Sansad)
| 0
| 0
| 0
|
81256977bc9b0ff7623132bef93f3ba3fe7872ae
| 4,444
|
py
|
Python
|
parsers.py
|
ekiwi/parsers
|
1837fe8c76b813da7befeee99668ab59b51aaefa
|
[
"BSD-2-Clause"
] | null | null | null |
parsers.py
|
ekiwi/parsers
|
1837fe8c76b813da7befeee99668ab59b51aaefa
|
[
"BSD-2-Clause"
] | null | null | null |
parsers.py
|
ekiwi/parsers
|
1837fe8c76b813da7befeee99668ab59b51aaefa
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018, University of California, Berkeley
# author: Kevin Laeufer <[email protected]>
from collections import defaultdict
if __name__ == "__main__":
g = Grammar()
S, B, D, E, F = non_term = g.non_terminal('S', 'B', 'D', 'E', 'F')
u, v, w, x, y, z = term = g.terminal('u', 'v', 'w', 'x', 'y', 'z')
g.r(S, [u, B, D, z])
g.r(B, [B, v])
g.r(B, [w])
g.r(D, [E, F])
g.r(E, [y])
g.r(E, [])
g.r(F, [x])
g.r(F, [])
for nt in non_term:
print(f"FIRST({nt}): {g.first(nt)}")
print()
for nt in non_term:
print(f"FOLLOW({nt}): {g.follow(nt)}")
print()
print(g.ll_one(check_conflicts=False))
| 22.789744
| 81
| 0.633213
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018, University of California, Berkeley
# author: Kevin Laeufer <[email protected]>
from collections import defaultdict
class Symbol:
def __init__(self, name, bold=False):
self.name = name
self._bold = bold
def __str__(self):
if self._bold:
return f"\033[1m{self.name}\033[0m"
else:
return f"{self.name}"
def __repr__(self):
return self.name
class NonTerminal(Symbol):
def __init__(self, name):
super().__init__(name, bold=True)
class Terminal(Symbol):
def __init__(self, name):
super().__init__(name)
class Epsilon(Terminal):
def __init__(self):
super().__init__("ε")
class Rule:
def __init__(self, lhs, rhs):
assert isinstance(lhs, NonTerminal)
assert isinstance(rhs, list)
assert all(isinstance(sym, Symbol) for sym in rhs)
self.lhs = lhs
self.rhs = rhs
def __getitem__(self, item):
if item not in {0, 1}:
raise IndexError(item)
if item == 0:
return self.lhs
else:
return self.rhs
def __str__(self):
return f"{self.lhs} -> {''.join(str(sym) for sym in self.rhs)}"
def __repr__(self):
return str(self)
class Grammar:
def __init__(self):
self._syms = [Epsilon(), Terminal('$')]
self._rules = []
self._root = None
def _make_syms(self, Type, names):
syms = [Type(name) for name in names]
self._syms += syms
return syms
def non_terminal(self, *names):
return self._make_syms(NonTerminal, names)
def terminal(self, *names):
return self._make_syms(Terminal, names)
def epsilon(self):
return self._syms[0]
def eof(self):
return self._syms[1]
def r(self, lhs, rhs):
if len(rhs) < 1: rhs = [self.epsilon()]
if self._root is None:
self._root = lhs # by convention
self._rules.append(Rule(lhs, rhs))
def first(self, sym):
assert isinstance(sym, Symbol), f"{sym} : {type(sym)}"
#print(f"FIRST({sym})")
if isinstance(sym, Terminal):
return {sym}
_first = set()
for lhs, rhs in self._rules:
if lhs != sym:
continue
annullable = True
for s in rhs:
if s == sym:
annullable = False
break
s_first = self.first(s)
s_annullable = self.epsilon() in s_first
_first = _first | (s_first - {self.epsilon()})
if not s_annullable:
annullable = False
break
if annullable:
_first |= {self.epsilon()}
return _first
def follow(self, non_term):
assert isinstance(non_term, NonTerminal)
_follow = set()
for lhs, rhs in self._rules:
if non_term not in rhs:
continue
for ii, sym in enumerate(rhs):
if sym != non_term:
continue
# scan following symbols
followed_by_annulable = True
for ff in rhs[ii+1:]:
_first = self.first(ff)
_follow |= (_first - {self.epsilon()})
if self.epsilon() not in _first:
followed_by_annulable = False
break
if followed_by_annulable:
_follow |= self.follow(lhs)
if non_term == self._root:
_follow |= {self.eof()}
return _follow
def ll_one(self, check_conflicts=False):
non_terms = [s for s in self._syms if isinstance(s, NonTerminal)]
table = defaultdict(dict)
for nt in non_terms:
terms = self.first(nt)
if self.epsilon() in terms:
terms = (terms - {self.epsilon()}) | self.follow(nt)
# pick rule:
for tt in terms:
applicable_rules = []
for rule in self._rules:
if rule.lhs != nt:
continue
# scan rhs
annullable = True
for sym in rule.rhs:
s_first = self.first(sym)
if tt in s_first:
applicable_rules.append(rule)
break
if not self.epsilon() in s_first:
annullable = False
break
if annullable and tt in self.follow(nt):
applicable_rules.append(rule)
if check_conflicts:
if len(applicable_rules) > 1:
raise RuntimeError(f"Found multiple applicable rules for ({nt}, {tt}):\n" +
'\n'.join(str(r) for r in applicable_rules))
table[nt][tt] = applicable_rules
return dict(table)
if __name__ == "__main__":
g = Grammar()
S, B, D, E, F = non_term = g.non_terminal('S', 'B', 'D', 'E', 'F')
u, v, w, x, y, z = term = g.terminal('u', 'v', 'w', 'x', 'y', 'z')
g.r(S, [u, B, D, z])
g.r(B, [B, v])
g.r(B, [w])
g.r(D, [E, F])
g.r(E, [y])
g.r(E, [])
g.r(F, [x])
g.r(F, [])
for nt in non_term:
print(f"FIRST({nt}): {g.first(nt)}")
print()
for nt in non_term:
print(f"FOLLOW({nt}): {g.follow(nt)}")
print()
print(g.ll_one(check_conflicts=False))
| 3,174
| -15
| 612
|
e6d68e135afb09552ac2f3d818b48fe79807d853
| 1,387
|
py
|
Python
|
Server.py
|
louis103/Python-Chat-Application
|
5212360194236daf5888d296fd71ed92303d7f94
|
[
"MIT"
] | 1
|
2021-11-22T20:04:16.000Z
|
2021-11-22T20:04:16.000Z
|
Server.py
|
louis103/Python-Chat-Application
|
5212360194236daf5888d296fd71ed92303d7f94
|
[
"MIT"
] | null | null | null |
Server.py
|
louis103/Python-Chat-Application
|
5212360194236daf5888d296fd71ed92303d7f94
|
[
"MIT"
] | null | null | null |
import socket, threading
HOST = "127.0.0.1"
PORT = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen()
clients = []
nicknames = []
# broadcast func
# handle func
# receive func
print("******Server is running******")
receive()
| 23.508475
| 72
| 0.581831
|
import socket, threading
HOST = "127.0.0.1"
PORT = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen()
clients = []
nicknames = []
# broadcast func
def broadcast(message):
for client in clients:
client.send(message)
# handle func
def handle(client):
while True:
try:
message = client.recv(2048)
print(f"{nicknames[clients.index(client)]} says {message}")
broadcast(message)
except:
index = clients.index(client)
clients.remove(client)
nickname = nicknames[index]
nicknames.remove(nickname)
break
# receive func
def receive():
while True:
client,address = server.accept()
print(f"Connected with {str(address)}!")
client.send("NICKNAME".encode("utf-8"))
nickname = client.recv(2048).decode("utf-8")
nicknames.append(nickname)
clients.append(client)
print(f"Nickname of new client is {nickname}")
broadcast(f"{nickname} joined the chat!\n".encode("utf-8"))
client.send("You Have Connected to the server".encode("utf-8"))
thread = threading.Thread(target=handle,args=(client,))
thread.start()
print("******Server is running******")
receive()
| 999
| 0
| 69
|
d2d464639fd7c2110b4c254cb34f59661eddfc5e
| 18,069
|
py
|
Python
|
pypsi/core.py
|
Rudedog9d/pypsi
|
38dda442b21b8deb569d61076ab0a19c0e78edc8
|
[
"0BSD"
] | null | null | null |
pypsi/core.py
|
Rudedog9d/pypsi
|
38dda442b21b8deb569d61076ab0a19c0e78edc8
|
[
"0BSD"
] | null | null | null |
pypsi/core.py
|
Rudedog9d/pypsi
|
38dda442b21b8deb569d61076ab0a19c0e78edc8
|
[
"0BSD"
] | null | null | null |
#
# Copyright (c) 2015, Adam Meily <[email protected]>
# Pypsi - https://github.com/ameily/pypsi
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
'''
Base classes for developing pluggable commands and plugins.
'''
import argparse
import sys
from pypsi.ansi import AnsiCodes, AnsiCode
from pypsi.format import get_lines, wrap_line
class Plugin(object):
'''
A plugin is an object that is able to modify a
:py:class:`pypsi.shell.Shell` object's behavior. Whereas a command can be
execute from user input, the `Plugin` class does not contain a `run()`
function.
'''
def __init__(self, preprocess=None, postprocess=None):
'''
Constructor can take two parameters: `preprocess` and `postprocess`
These values determine where the plugin resides inside of the
preprocess and postprocess list. This list, inside of
:class:`pypsi.shell.Shell`, is iterated sequentially, from most
priority to least. So, the highest priority value is 0, which means it
will be the first plugin to run, and the lowest value is 100, which
means it will be the last plugin to run. If either value is `None`, the
plugin is not added to the processing list. For example, if this plugin
only provides a preprocessing functionality, then postprocess should be
set to :const:`None`.
:param int preprocess: the preprocess priority
:param int postprocess: the postprocess priority
'''
self.preprocess = preprocess
self.postprocess = postprocess
def setup(self, shell): # pylint: disable=unused-argument
'''
Called after the plugin has been registered to the active shell.
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on failure
'''
return 0
def on_input(self, shell, line): # pylint: disable=unused-argument
'''
Called after input from the user has been received. The return value is
the preprocessed line. This means that modifying the line argument will
not populate back. If this function does no preprocessing, return line
unmodified.
:param pypsi.shell.Shell shell: the active shell
:param str line: the current input statement string
:returns str: the preprocessed line
'''
return line
def on_tokenize(self, shell, tokens, origin): # pylint: disable=unused-argument
'''
Called after an input string has been tokenized. If this function
performs no preprocessing, return the tokens unmodified.
:param pypsi.shell.Shell shell: the active shell
:param list tokens: the list of :class:`pypsi.cmdline.Token` objects
:param str origin: the origin of the input, can be either 'input' if
received from a call to `input()` or 'prompt' if the input is the
prompt to display to the user
:returns list: the list of preprocessed :class:`pypsi.cmdline.Token`
objects
'''
return tokens
def on_input_canceled(self, shell): # pylint: disable=unused-argument
'''
Called when the user can canceled entering a statement via SIGINT
(Ctrl+C).
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on error
'''
return 0
def on_statement_finished(self, shell, rc): # pylint: disable=unused-argument
'''
Called when a statement has been completely executed.
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on error
'''
return 0
class Command(object):
'''
A pluggable command that users can execute. All commands need to derive
from this class. When a command is executed by a user, the command's
:meth:`run` method will be called. The return value of the :meth:`run`
method is used when processing forthcoming commands in the active
statement. The return value must be an :class:`int` and follows the Unix
standard: 0 on success, less than 0 on error, and greater than 0 given
invalid input or incorrect usage.
Each command has a topic associated with it. This topic can be referenced
by commands such as :class:`pypsi.commands.help.HelpCommand` to categorize
commands in help messages.
A command can be used as a fallback handler by implementing the
:meth:`fallback` method. This is similar to the :meth:`run` method, except
that is accepts one more argument: the command name to execute that wasn't
found by the shell. The return value of :meth:`fallback` holds the same
purpose as the return value of :meth:`run`.
By the time :meth:`run` is called, the system streams have been updated to
point to the current file streams issued in the statement. For example, if
the statement redirects standard out (:attr:`sys.stdout`) to a file, the
destination file is automatically opened and :attr:`sys.stdout` is
redirected to the opened file stream. Once the command has complete
execution, the redirected stream is automatically closed and
:attr:`sys.stdout` is set to its original stream.
'''
def __init__(self, name, usage=None, brief=None,
topic=None, pipe='str'):
'''
:param str name: the name of the command which the user will reference
in the shell
:param str usage: the usage message to be displayed to the user
:param str brief: a brief description of the command
:param str topic: the topic that this command belongs to
:param str pipe: the type of data that will be read from and written to
any pipes
'''
self.name = name
self.usage = usage or ''
self.brief = brief or ''
self.topic = topic or ''
self.pipe = pipe or 'str'
def complete(self, shell, args, prefix): # pylint: disable=unused-argument
'''
Called when the user attempts a tab-completion action for this command.
:param pypsi.shell.Shell shell: the active shell
:param list args: the list of arguments, the last one containing the
cursor position
:param str prefix: the prefix that all items returned must start with
:returns list: the list of strings that could complete the current
action
'''
return []
def usage_error(self, shell, *args):
'''
Display an error message that indicates incorrect usage of this
command. After the error is displayed, the usage is printed.
:param pypsi.shell.Shell shell: the active shell
:param args: list of strings that are the error message
'''
self.error(shell, *args)
print(AnsiCodes.yellow, self.usage, AnsiCodes.reset, sep='')
def error(self, shell, *args): # pylint: disable=unused-argument
'''
Display an error message to the user.
:param pypsi.shell.Shell shell: the active shell
:param args: the error message to display
'''
msg = "{}: {}".format(self.name, ''.join([str(a) for a in args]))
print(AnsiCodes.red, msg, AnsiCodes.reset, file=sys.stderr, sep='')
def run(self, shell, args):
'''
Execute the command. All commands need to implement this method.
:param pypsi.shell.Shell shell: the active shell
:param list args: list of string arguments
:returns int: 0 on success, less than 0 on error, and greater than 0 on
invalid usage
'''
raise NotImplementedError()
def setup(self, shell): # pylint: disable=unused-argument
'''
Called when the plugin has been registered to the active shell.
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on error
'''
return 0
def fallback(self, shell, name, args): # pylint: disable=unused-argument
'''
Called when this command was set as the fallback command. The only
difference between this and :meth:`run` is that this method accepts the
command name that was entered by the user.
:param pypsi.shell.Shell shell: the active shell
:param str name: the name of the command to run
:param list args: arguments
:returns int: 0 on success, less than 0 on error, and greater than 0 on
invalid usage
'''
return None
class CommandShortCircuit(Exception):
'''
Exception raised when the user enter invalid arguments or requests usage
information via the -h and --help flags.
'''
def __init__(self, code):
'''
:param int code: the code the command should return
'''
super().__init__(code)
self.code = code
class PypsiArgParser(argparse.ArgumentParser):
'''
Customized :class:`argparse.ArgumentParser` for use in pypsi. This class
slightly modifies the base ArgumentParser so that the following occurs:
- The whole program does not exit on printing the help message or bad
arguments
- Any error messages are intercepted and printed on the active shell's
error stream
- Adds the option to provide callbacks for tab-completing
options and parameters
'''
def get_options(self):
'''
:return: All optional arguments (ex, '-v'/'--verbose')
'''
return list(self._op_completers.keys())
def get_option_completer(self, option):
'''
Returns the callback for the specified optional argument,
Or None if one was not specified.
:param str option: The Option
:return function: The callback function or None
'''
return self._op_completers.get(option, None)
def has_value(self, arg):
'''
Check if the optional argument has a value associated with it.
:param str arg: Optional argument to check
:return: True if arg has a value, false otherwise
'''
# pylint: disable=protected-access
# _option_string_actions is a dictionary containing all of the optional
# arguments and the argparse action they should perform. Currently, the
# only two actions that store a value are _AppendAction/_StoreAction.
# These represent the value passed to 'action' in add_argument:
# parser.add_argument('-l', '--long', action='store')
action = self._option_string_actions.get(arg, None)
return isinstance(action,
(argparse._AppendAction, argparse._StoreAction))
def get_positional_completer(self, pos):
'''
Get the callback for a positional parameter
:param pos: index of the parameter - first param's index = 0
:return: The callback if it exists, else None
'''
try:
return self._pos_completers[pos]
except IndexError:
if self._repeating_cb:
# A positional parameter is set to repeat
return self._repeating_cb
return None
def get_positional_arg_index(self, args):
'''
Get the positional index of a cursor, based on
optional arguments and positional arguments
:param list args: List of str arguments from the Command Line
:return:
'''
index = 0
for token in args:
if token in self._option_string_actions:
# Token is an optional argument ( ex, '-v' / '--verbose' )
if self.has_value(token):
# Optional Argument has a value associated with it, so
# reduce index to not count it's value as a pos param
index -= 1
else:
# Is a positional param or value for an optional argument
index += 1
# return zero-based index
return index - 1
def add_argument(self, *args, completer=None, **kwargs): # pylint: disable=arguments-differ
'''
Override add_argument function of argparse.ArgumentParser to
handle callback functions.
:param args: Positional arguments to pass up to argparse
:param function completer: Optional callback function for argument
:param kwargs: Keywork arguments to pass up to argparse
:return:
'''
cb = completer
nargs = kwargs.get('nargs', None)
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
# If no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument ( from argparse )
if nargs and nargs in ('+', '*', argparse.REMAINDER):
# Positional param can repeat
# Currently only stores the last repeating completer specified
self._repeating_cb = cb
self._pos_completers.append(cb)
else:
# Add an optional argument
for arg in args:
self._op_completers[arg] = cb
# Call argparse.add_argument()
return super().add_argument(*args, **kwargs)
def pypsi_print(*args, sep=' ', end='\n', file=None, flush=True, width=None,
wrap=True, wrap_prefix=None, replace_errors=True):
'''
Wraps the functionality of the Python builtin `print` function. The
:meth:`pypsi.shell.Shell.bootstrap` overrides the Python :meth:`print`
function with :meth:`pypsi_print`.
:param str sep: string to print between arguments
:param str end: string to print at the end of the output
:param file file: output stream, if this is :const:`None`, the default is
:data:`sys.stdout`
:param bool flush: whether to flush the output stream
:param int width: override the stream's width
:param bool wrap: whether to word wrap the output
:param str wrap_prefix: prefix string to print prior to every new line that
is wrapped
:param bool replace_errors: replace invalid character points with the '?'
character
'''
file = file or sys.stdout
last = len(args) - 1
def write_safe(data):
'''
Write the input str to the file and, if an encoding error occurs and
replace_errors is ``True``, remove invalid code points and print again.
'''
try:
file.write(data)
except UnicodeEncodeError:
if replace_errors:
enc = getattr(file, 'encoding', sys.getdefaultencoding())
file.write(data.encode(enc, errors='replace').decode(enc))
else:
raise
if wrap and hasattr(file, 'width') and file.width:
width = width or file.width
parts = []
for arg in args:
if isinstance(arg, str):
parts.append(arg)
elif arg is None:
parts.append('')
elif isinstance(arg, AnsiCode):
if file.isatty():
parts.append(str(arg))
elif arg.s is not None:
parts.append(str(arg.s))
else:
parts.append(str(arg))
txt = sep.join(parts)
for (line, endl) in get_lines(txt):
if line:
first = True
wrapno = 0
for wrapped in wrap_line(line, width, wrap_prefix=wrap_prefix):
if not wrapped:
continue
wrapno += 1
if not first:
file.write('\n')
else:
first = False
write_safe(wrapped)
if not line or endl:
file.write('\n')
else:
last = len(args) - 1
for (i, arg) in enumerate(args):
write_safe(str(arg))
if sep and i != last:
write_safe(sep)
if end:
write_safe(end)
if flush:
file.flush()
| 38.363057
| 96
| 0.625989
|
#
# Copyright (c) 2015, Adam Meily <[email protected]>
# Pypsi - https://github.com/ameily/pypsi
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
'''
Base classes for developing pluggable commands and plugins.
'''
import argparse
import sys
from pypsi.ansi import AnsiCodes, AnsiCode
from pypsi.format import get_lines, wrap_line
class Plugin(object):
'''
A plugin is an object that is able to modify a
:py:class:`pypsi.shell.Shell` object's behavior. Whereas a command can be
execute from user input, the `Plugin` class does not contain a `run()`
function.
'''
def __init__(self, preprocess=None, postprocess=None):
'''
Constructor can take two parameters: `preprocess` and `postprocess`
These values determine where the plugin resides inside of the
preprocess and postprocess list. This list, inside of
:class:`pypsi.shell.Shell`, is iterated sequentially, from most
priority to least. So, the highest priority value is 0, which means it
will be the first plugin to run, and the lowest value is 100, which
means it will be the last plugin to run. If either value is `None`, the
plugin is not added to the processing list. For example, if this plugin
only provides a preprocessing functionality, then postprocess should be
set to :const:`None`.
:param int preprocess: the preprocess priority
:param int postprocess: the postprocess priority
'''
self.preprocess = preprocess
self.postprocess = postprocess
def setup(self, shell): # pylint: disable=unused-argument
'''
Called after the plugin has been registered to the active shell.
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on failure
'''
return 0
def on_input(self, shell, line): # pylint: disable=unused-argument
'''
Called after input from the user has been received. The return value is
the preprocessed line. This means that modifying the line argument will
not populate back. If this function does no preprocessing, return line
unmodified.
:param pypsi.shell.Shell shell: the active shell
:param str line: the current input statement string
:returns str: the preprocessed line
'''
return line
def on_tokenize(self, shell, tokens, origin): # pylint: disable=unused-argument
'''
Called after an input string has been tokenized. If this function
performs no preprocessing, return the tokens unmodified.
:param pypsi.shell.Shell shell: the active shell
:param list tokens: the list of :class:`pypsi.cmdline.Token` objects
:param str origin: the origin of the input, can be either 'input' if
received from a call to `input()` or 'prompt' if the input is the
prompt to display to the user
:returns list: the list of preprocessed :class:`pypsi.cmdline.Token`
objects
'''
return tokens
def on_input_canceled(self, shell): # pylint: disable=unused-argument
'''
Called when the user can canceled entering a statement via SIGINT
(Ctrl+C).
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on error
'''
return 0
def on_statement_finished(self, shell, rc): # pylint: disable=unused-argument
'''
Called when a statement has been completely executed.
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on error
'''
return 0
class Command(object):
'''
A pluggable command that users can execute. All commands need to derive
from this class. When a command is executed by a user, the command's
:meth:`run` method will be called. The return value of the :meth:`run`
method is used when processing forthcoming commands in the active
statement. The return value must be an :class:`int` and follows the Unix
standard: 0 on success, less than 0 on error, and greater than 0 given
invalid input or incorrect usage.
Each command has a topic associated with it. This topic can be referenced
by commands such as :class:`pypsi.commands.help.HelpCommand` to categorize
commands in help messages.
A command can be used as a fallback handler by implementing the
:meth:`fallback` method. This is similar to the :meth:`run` method, except
that is accepts one more argument: the command name to execute that wasn't
found by the shell. The return value of :meth:`fallback` holds the same
purpose as the return value of :meth:`run`.
By the time :meth:`run` is called, the system streams have been updated to
point to the current file streams issued in the statement. For example, if
the statement redirects standard out (:attr:`sys.stdout`) to a file, the
destination file is automatically opened and :attr:`sys.stdout` is
redirected to the opened file stream. Once the command has complete
execution, the redirected stream is automatically closed and
:attr:`sys.stdout` is set to its original stream.
'''
def __init__(self, name, usage=None, brief=None,
topic=None, pipe='str'):
'''
:param str name: the name of the command which the user will reference
in the shell
:param str usage: the usage message to be displayed to the user
:param str brief: a brief description of the command
:param str topic: the topic that this command belongs to
:param str pipe: the type of data that will be read from and written to
any pipes
'''
self.name = name
self.usage = usage or ''
self.brief = brief or ''
self.topic = topic or ''
self.pipe = pipe or 'str'
def complete(self, shell, args, prefix): # pylint: disable=unused-argument
'''
Called when the user attempts a tab-completion action for this command.
:param pypsi.shell.Shell shell: the active shell
:param list args: the list of arguments, the last one containing the
cursor position
:param str prefix: the prefix that all items returned must start with
:returns list: the list of strings that could complete the current
action
'''
return []
def usage_error(self, shell, *args):
'''
Display an error message that indicates incorrect usage of this
command. After the error is displayed, the usage is printed.
:param pypsi.shell.Shell shell: the active shell
:param args: list of strings that are the error message
'''
self.error(shell, *args)
print(AnsiCodes.yellow, self.usage, AnsiCodes.reset, sep='')
def error(self, shell, *args): # pylint: disable=unused-argument
'''
Display an error message to the user.
:param pypsi.shell.Shell shell: the active shell
:param args: the error message to display
'''
msg = "{}: {}".format(self.name, ''.join([str(a) for a in args]))
print(AnsiCodes.red, msg, AnsiCodes.reset, file=sys.stderr, sep='')
def run(self, shell, args):
'''
Execute the command. All commands need to implement this method.
:param pypsi.shell.Shell shell: the active shell
:param list args: list of string arguments
:returns int: 0 on success, less than 0 on error, and greater than 0 on
invalid usage
'''
raise NotImplementedError()
def setup(self, shell): # pylint: disable=unused-argument
'''
Called when the plugin has been registered to the active shell.
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on error
'''
return 0
def fallback(self, shell, name, args): # pylint: disable=unused-argument
'''
Called when this command was set as the fallback command. The only
difference between this and :meth:`run` is that this method accepts the
command name that was entered by the user.
:param pypsi.shell.Shell shell: the active shell
:param str name: the name of the command to run
:param list args: arguments
:returns int: 0 on success, less than 0 on error, and greater than 0 on
invalid usage
'''
return None
class CommandShortCircuit(Exception):
'''
Exception raised when the user enter invalid arguments or requests usage
information via the -h and --help flags.
'''
def __init__(self, code):
'''
:param int code: the code the command should return
'''
super().__init__(code)
self.code = code
class PypsiArgParser(argparse.ArgumentParser):
'''
Customized :class:`argparse.ArgumentParser` for use in pypsi. This class
slightly modifies the base ArgumentParser so that the following occurs:
- The whole program does not exit on printing the help message or bad
arguments
- Any error messages are intercepted and printed on the active shell's
error stream
- Adds the option to provide callbacks for tab-completing
options and parameters
'''
def __init__(self, *args, **kwargs):
#: Store callback functions for positional parameters
self._pos_completers = []
#: Store callback functions for optional arguments with values
self._op_completers = {}
#: If a positional argument can be specified more than once,
# store it's callback here and return it multiple times
self._repeating_cb = None
super().__init__(*args, **kwargs)
def exit(self, status=0, message=None):
if message:
print(AnsiCodes.red, message, AnsiCodes.reset, file=sys.stderr,
sep='')
raise CommandShortCircuit(status)
def print_usage(self, file=None):
f = file or sys.stderr
print(AnsiCodes.yellow, self.format_usage(), AnsiCodes.reset, sep='',
file=f)
def print_help(self, file=None):
f = file or sys.stderr
print(AnsiCodes.yellow, self.format_help(), AnsiCodes.reset, sep='',
file=f)
def get_options(self):
'''
:return: All optional arguments (ex, '-v'/'--verbose')
'''
return list(self._op_completers.keys())
def get_option_completer(self, option):
'''
Returns the callback for the specified optional argument,
Or None if one was not specified.
:param str option: The Option
:return function: The callback function or None
'''
return self._op_completers.get(option, None)
def has_value(self, arg):
'''
Check if the optional argument has a value associated with it.
:param str arg: Optional argument to check
:return: True if arg has a value, false otherwise
'''
# pylint: disable=protected-access
# _option_string_actions is a dictionary containing all of the optional
# arguments and the argparse action they should perform. Currently, the
# only two actions that store a value are _AppendAction/_StoreAction.
# These represent the value passed to 'action' in add_argument:
# parser.add_argument('-l', '--long', action='store')
action = self._option_string_actions.get(arg, None)
return isinstance(action,
(argparse._AppendAction, argparse._StoreAction))
def get_positional_completer(self, pos):
'''
Get the callback for a positional parameter
:param pos: index of the parameter - first param's index = 0
:return: The callback if it exists, else None
'''
try:
return self._pos_completers[pos]
except IndexError:
if self._repeating_cb:
# A positional parameter is set to repeat
return self._repeating_cb
return None
def get_positional_arg_index(self, args):
'''
Get the positional index of a cursor, based on
optional arguments and positional arguments
:param list args: List of str arguments from the Command Line
:return:
'''
index = 0
for token in args:
if token in self._option_string_actions:
# Token is an optional argument ( ex, '-v' / '--verbose' )
if self.has_value(token):
# Optional Argument has a value associated with it, so
# reduce index to not count it's value as a pos param
index -= 1
else:
# Is a positional param or value for an optional argument
index += 1
# return zero-based index
return index - 1
def add_argument(self, *args, completer=None, **kwargs): # pylint: disable=arguments-differ
'''
Override add_argument function of argparse.ArgumentParser to
handle callback functions.
:param args: Positional arguments to pass up to argparse
:param function completer: Optional callback function for argument
:param kwargs: Keywork arguments to pass up to argparse
:return:
'''
cb = completer
nargs = kwargs.get('nargs', None)
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
# If no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument ( from argparse )
if nargs and nargs in ('+', '*', argparse.REMAINDER):
# Positional param can repeat
# Currently only stores the last repeating completer specified
self._repeating_cb = cb
self._pos_completers.append(cb)
else:
# Add an optional argument
for arg in args:
self._op_completers[arg] = cb
# Call argparse.add_argument()
return super().add_argument(*args, **kwargs)
def error(self, message):
print(AnsiCodes.red, self.prog, ": error: ", message, AnsiCodes.reset,
sep='', file=sys.stderr)
self.print_usage()
self.exit(1)
def pypsi_print(*args, sep=' ', end='\n', file=None, flush=True, width=None,
wrap=True, wrap_prefix=None, replace_errors=True):
'''
Wraps the functionality of the Python builtin `print` function. The
:meth:`pypsi.shell.Shell.bootstrap` overrides the Python :meth:`print`
function with :meth:`pypsi_print`.
:param str sep: string to print between arguments
:param str end: string to print at the end of the output
:param file file: output stream, if this is :const:`None`, the default is
:data:`sys.stdout`
:param bool flush: whether to flush the output stream
:param int width: override the stream's width
:param bool wrap: whether to word wrap the output
:param str wrap_prefix: prefix string to print prior to every new line that
is wrapped
:param bool replace_errors: replace invalid character points with the '?'
character
'''
file = file or sys.stdout
last = len(args) - 1
def write_safe(data):
'''
Write the input str to the file and, if an encoding error occurs and
replace_errors is ``True``, remove invalid code points and print again.
'''
try:
file.write(data)
except UnicodeEncodeError:
if replace_errors:
enc = getattr(file, 'encoding', sys.getdefaultencoding())
file.write(data.encode(enc, errors='replace').decode(enc))
else:
raise
if wrap and hasattr(file, 'width') and file.width:
width = width or file.width
parts = []
for arg in args:
if isinstance(arg, str):
parts.append(arg)
elif arg is None:
parts.append('')
elif isinstance(arg, AnsiCode):
if file.isatty():
parts.append(str(arg))
elif arg.s is not None:
parts.append(str(arg.s))
else:
parts.append(str(arg))
txt = sep.join(parts)
for (line, endl) in get_lines(txt):
if line:
first = True
wrapno = 0
for wrapped in wrap_line(line, width, wrap_prefix=wrap_prefix):
if not wrapped:
continue
wrapno += 1
if not first:
file.write('\n')
else:
first = False
write_safe(wrapped)
if not line or endl:
file.write('\n')
else:
last = len(args) - 1
for (i, arg) in enumerate(args):
write_safe(str(arg))
if sep and i != last:
write_safe(sep)
if end:
write_safe(end)
if flush:
file.flush()
| 1,062
| 0
| 135
|
5050f96a5b09f087a43bfbf366927f7c8ded0262
| 687
|
py
|
Python
|
2020/6/main.py
|
klrkdekira/adventofcode
|
8384d919093712c95b707b8e3f293dbfba22be74
|
[
"BSD-2-Clause"
] | 1
|
2020-12-01T08:41:55.000Z
|
2020-12-01T08:41:55.000Z
|
2020/6/main.py
|
klrkdekira/adventofcode
|
8384d919093712c95b707b8e3f293dbfba22be74
|
[
"BSD-2-Clause"
] | null | null | null |
2020/6/main.py
|
klrkdekira/adventofcode
|
8384d919093712c95b707b8e3f293dbfba22be74
|
[
"BSD-2-Clause"
] | null | null | null |
QUESTIONS = ['a', 'b', 'c', 'x', 'y', 'z']
if __name__ == '__main__':
with open('input') as file:
groups = []
group = []
for row in file:
row = row.strip()
if not row:
groups.append(group)
group = []
continue
group.append(row)
groups.append(group)
print(sum(map(anyone, groups)))
print(sum(map(everyone, groups)))
| 24.535714
| 66
| 0.519651
|
QUESTIONS = ['a', 'b', 'c', 'x', 'y', 'z']
def anyone(group):
answers = []
for person in group:
answers.extend(person)
return len(set(answers))
def everyone(group):
answers = set.intersection(*(set(person) for person in group))
return len(answers)
if __name__ == '__main__':
with open('input') as file:
groups = []
group = []
for row in file:
row = row.strip()
if not row:
groups.append(group)
group = []
continue
group.append(row)
groups.append(group)
print(sum(map(anyone, groups)))
print(sum(map(everyone, groups)))
| 189
| 0
| 46
|
62d60dcf7dc46a76d9c2e17fa4e8e062fa646f12
| 9,440
|
py
|
Python
|
frille-lang/lib/python3.6/site-packages/pathy/gcs.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | null | null | null |
frille-lang/lib/python3.6/site-packages/pathy/gcs.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | null | null | null |
frille-lang/lib/python3.6/site-packages/pathy/gcs.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import Any, Dict, Generator, List, Optional
from .base import (
Blob,
Bucket,
BucketClient,
BucketEntry,
ClientError,
PathyScanDir,
PurePathy,
)
try:
from google.api_core import exceptions as gcs_errors # type:ignore
from google.auth.exceptions import DefaultCredentialsError # type:ignore
from google.cloud.storage import Blob as GCSNativeBlob # type:ignore
from google.cloud.storage import Bucket as GCSNativeBucket # type:ignore
from google.cloud.storage import Client as GCSNativeClient # type:ignore
has_gcs = True
except ImportError:
GCSNativeBlob = Any
DefaultCredentialsError = BaseException
gcs_errors = Any
GCSNativeBucket = Any
GCSNativeClient = Any
has_gcs = False
_MISSING_DEPS = """You are using the GCS functionality of Pathy without
having the required dependencies installed.
Please try installing them:
pip install pathy[gcs]
"""
@dataclass
@dataclass
| 34.327273
| 86
| 0.584322
|
from dataclasses import dataclass
from typing import Any, Dict, Generator, List, Optional
from .base import (
Blob,
Bucket,
BucketClient,
BucketEntry,
ClientError,
PathyScanDir,
PurePathy,
)
try:
from google.api_core import exceptions as gcs_errors # type:ignore
from google.auth.exceptions import DefaultCredentialsError # type:ignore
from google.cloud.storage import Blob as GCSNativeBlob # type:ignore
from google.cloud.storage import Bucket as GCSNativeBucket # type:ignore
from google.cloud.storage import Client as GCSNativeClient # type:ignore
has_gcs = True
except ImportError:
GCSNativeBlob = Any
DefaultCredentialsError = BaseException
gcs_errors = Any
GCSNativeBucket = Any
GCSNativeClient = Any
has_gcs = False
_MISSING_DEPS = """You are using the GCS functionality of Pathy without
having the required dependencies installed.
Please try installing them:
pip install pathy[gcs]
"""
class BucketEntryGCS(BucketEntry["BucketGCS", GCSNativeBlob]):
...
@dataclass
class BlobGCS(Blob[GCSNativeBucket, GCSNativeBlob]):
def delete(self) -> None:
self.raw.delete()
def exists(self) -> bool:
return self.raw.exists()
@dataclass
class BucketGCS(Bucket):
name: str
bucket: GCSNativeBucket
def get_blob(self, blob_name: str) -> Optional[BlobGCS]:
assert isinstance(
blob_name, str
), f"expected str blob name, but found: {type(blob_name)}"
native_blob = None
try:
native_blob = self.bucket.get_blob(blob_name)
except gcs_errors.ClientError:
pass
if native_blob is None:
return None
return BlobGCS(
bucket=self.bucket,
owner=native_blob.owner,
name=native_blob.name,
raw=native_blob,
size=native_blob.size,
updated=int(native_blob.updated.timestamp()),
)
def copy_blob( # type:ignore[override]
self, blob: BlobGCS, target: "BucketGCS", name: str
) -> Optional[BlobGCS]:
assert blob.raw is not None, "raw storage.Blob instance required"
native_blob = self.bucket.copy_blob(blob.raw, target.bucket, name)
if native_blob is None:
return None
return BlobGCS(
bucket=self.bucket,
owner=native_blob.owner,
name=native_blob.name,
raw=native_blob,
size=native_blob.size,
updated=int(native_blob.updated.timestamp()),
)
def delete_blob(self, blob: BlobGCS) -> None: # type:ignore[override]
return self.bucket.delete_blob(blob.name)
def delete_blobs(self, blobs: List[BlobGCS]) -> None: # type:ignore[override]
return self.bucket.delete_blobs(blobs)
def exists(self) -> bool:
try:
return self.bucket.exists()
except gcs_errors.ClientError:
return False
class BucketClientGCS(BucketClient):
client: Optional[GCSNativeClient]
@property
def client_params(self) -> Any:
return dict(client=self.client)
def __init__(self, **kwargs: Any) -> None:
self.recreate(**kwargs)
def recreate(self, **kwargs: Any) -> None:
creds = kwargs["credentials"] if "credentials" in kwargs else None
if creds is not None:
kwargs["project"] = creds.project_id
try:
self.client = GCSNativeClient(**kwargs)
except TypeError:
# TypeError is raised if the imports for GCSNativeClient fail and are
# assigned to Any, which is not callable.
self.client = None
def make_uri(self, path: PurePathy) -> str:
return str(path)
def create_bucket(self, path: PurePathy) -> Bucket:
assert self.client is not None, _MISSING_DEPS
return self.client.create_bucket(path.root)
def delete_bucket(self, path: PurePathy) -> None:
assert self.client is not None, _MISSING_DEPS
bucket = self.client.get_bucket(path.root)
bucket.delete()
def exists(self, path: PurePathy) -> bool:
# Because we want all the parents of a valid blob (e.g. "directory" in
# "directory/foo.file") to return True, we enumerate the blobs with a prefix
# and compare the object names to see if they match a substring of the path
key_name = str(path.key)
try:
for obj in self.list_blobs(path):
if obj.name == key_name:
return True
if obj.name.startswith(key_name + path._flavour.sep):
return True
except gcs_errors.ClientError:
return False
return False
def lookup_bucket(self, path: PurePathy) -> Optional[BucketGCS]:
assert self.client is not None, _MISSING_DEPS
try:
native_bucket = self.client.bucket(path.root)
if native_bucket is not None:
return BucketGCS(str(path.root), bucket=native_bucket)
except gcs_errors.ClientError as err:
print(err)
return None
def get_bucket(self, path: PurePathy) -> BucketGCS:
assert self.client is not None, _MISSING_DEPS
try:
native_bucket = self.client.bucket(path.root)
if native_bucket is not None:
return BucketGCS(str(path.root), bucket=native_bucket)
raise FileNotFoundError(f"Bucket {path.root} does not exist!")
except gcs_errors.ClientError as e:
raise ClientError(message=e.message, code=e.code)
def list_buckets(
self, **kwargs: Dict[str, Any]
) -> Generator[GCSNativeBucket, None, None]:
assert self.client is not None, _MISSING_DEPS
return self.client.list_buckets(**kwargs) # type:ignore
def scandir( # type:ignore[override]
self,
path: Optional[PurePathy] = None,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
) -> PathyScanDir:
return _GCSScanDir(client=self, path=path, prefix=prefix, delimiter=delimiter)
def list_blobs(
self,
path: PurePathy,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
include_dirs: bool = False,
) -> Generator[BlobGCS, None, None]:
assert self.client is not None, _MISSING_DEPS
continuation_token = None
bucket = self.lookup_bucket(path)
if bucket is None:
return
while True:
if continuation_token:
response = self.client.list_blobs(
path.root,
prefix=prefix,
delimiter=delimiter,
page_token=continuation_token,
)
else:
response = self.client.list_blobs(
path.root, prefix=prefix, delimiter=delimiter
)
for page in response.pages:
for item in page:
yield BlobGCS(
bucket=bucket,
owner=item.owner,
name=item.name,
raw=item,
size=item.size,
updated=item.updated.timestamp(),
)
if response.next_page_token is None:
break
continuation_token = response.next_page_token
class _GCSScanDir(PathyScanDir):
_client: BucketClientGCS
def scandir(self) -> Generator[BucketEntryGCS, None, None]:
assert self._client.client is not None, _MISSING_DEPS
continuation_token = None
if self._path is None or not self._path.root:
gcs_bucket: GCSNativeBucket
for gcs_bucket in self._client.client.list_buckets():
yield BucketEntryGCS(gcs_bucket.name, is_dir=True, raw=None)
return
sep = self._path._flavour.sep
bucket = self._client.lookup_bucket(self._path)
if bucket is None:
return
while True:
if continuation_token:
response = self._client.client.list_blobs(
bucket.name,
prefix=self._prefix,
delimiter=sep,
page_token=continuation_token,
)
else:
response = self._client.client.list_blobs(
bucket.name, prefix=self._prefix, delimiter=sep
)
for page in response.pages:
for folder in list(page.prefixes):
full_name = folder[:-1] if folder.endswith(sep) else folder
name = full_name.split(sep)[-1]
if name:
yield BucketEntryGCS(name, is_dir=True, raw=None)
for item in page:
name = item.name.split(sep)[-1]
if name:
yield BucketEntryGCS(
name=name,
is_dir=False,
size=item.size,
last_modified=item.updated.timestamp(),
raw=item,
)
if response.next_page_token is None:
break
continuation_token = response.next_page_token
| 7,541
| 718
| 166
|
3deebfeffce2abe1ba44b1052c91bfb62a647fb4
| 1,750
|
py
|
Python
|
project/both.py
|
mahmoudabuelnaga/baby-names-scraping
|
44ded037a4c24306123c4da749e32575eee4afc6
|
[
"MIT"
] | null | null | null |
project/both.py
|
mahmoudabuelnaga/baby-names-scraping
|
44ded037a4c24306123c4da749e32575eee4afc6
|
[
"MIT"
] | null | null | null |
project/both.py
|
mahmoudabuelnaga/baby-names-scraping
|
44ded037a4c24306123c4da749e32575eee4afc6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from time import sleep
from bs4 import BeautifulSoup
import csv
import requests
links = []
items = []
for i in range(1,38):
endpoint = f"https://baby.webteb.com/baby-names/%D8%A7%D8%B3%D9%85%D8%A7%D8%A1-%D8%A7%D9%88%D9%84%D8%A7%D8%AF-%D9%88%D8%A8%D9%86%D8%A7%D8%AA?pageindex={i}"
get_response = requests.get(endpoint)
# print(get_response.content)
soup = BeautifulSoup(get_response.content, 'lxml')
# print(soup.prettify())
section = soup.find('div', {'class':'page-section'})
for li in section.find_all('li'):
links.append(li.a['href'])
print(f'{i}', li.a['href'])
for i, link in zip(range(1,len(links)+1), links):
url = f"https://baby.webteb.com{link}"
get_response = requests.get(url)
soup = BeautifulSoup(get_response.content, 'lxml')
content = soup.find('div', {'class':'section name'})
section1 = content.find('div', {'class':'section'})
name_detail = content.find('div', {'class':'name-details'})
section2 = name_detail.find('div', {'class':'section'})
span = section2.find('span', {'class':'latin'})
item = {}
if content.h1.text:
item['arabic_name'] = content.h1.text
if section1.p.text:
item['meaning'] = section1.p.text
if span.text:
item['english_name'] = span.text
print(i, content.h1.text, section1.p.text, span.text)
items.append(item)
filename = '/home/naga/dev/babyNamesScraping/project/both.csv'
with open(filename, 'w', newline='') as f:
w = csv.DictWriter(f, fieldnames=['arabic_name','meaning', 'english_name'], extrasaction='ignore' , delimiter = ';')
w.writeheader()
print(items)
for item in items:
w.writerow(item)
print(item)
| 29.166667
| 159
| 0.630857
|
# -*- coding: utf-8 -*-
from time import sleep
from bs4 import BeautifulSoup
import csv
import requests
links = []
items = []
for i in range(1,38):
endpoint = f"https://baby.webteb.com/baby-names/%D8%A7%D8%B3%D9%85%D8%A7%D8%A1-%D8%A7%D9%88%D9%84%D8%A7%D8%AF-%D9%88%D8%A8%D9%86%D8%A7%D8%AA?pageindex={i}"
get_response = requests.get(endpoint)
# print(get_response.content)
soup = BeautifulSoup(get_response.content, 'lxml')
# print(soup.prettify())
section = soup.find('div', {'class':'page-section'})
for li in section.find_all('li'):
links.append(li.a['href'])
print(f'{i}', li.a['href'])
for i, link in zip(range(1,len(links)+1), links):
url = f"https://baby.webteb.com{link}"
get_response = requests.get(url)
soup = BeautifulSoup(get_response.content, 'lxml')
content = soup.find('div', {'class':'section name'})
section1 = content.find('div', {'class':'section'})
name_detail = content.find('div', {'class':'name-details'})
section2 = name_detail.find('div', {'class':'section'})
span = section2.find('span', {'class':'latin'})
item = {}
if content.h1.text:
item['arabic_name'] = content.h1.text
if section1.p.text:
item['meaning'] = section1.p.text
if span.text:
item['english_name'] = span.text
print(i, content.h1.text, section1.p.text, span.text)
items.append(item)
filename = '/home/naga/dev/babyNamesScraping/project/both.csv'
with open(filename, 'w', newline='') as f:
w = csv.DictWriter(f, fieldnames=['arabic_name','meaning', 'english_name'], extrasaction='ignore' , delimiter = ';')
w.writeheader()
print(items)
for item in items:
w.writerow(item)
print(item)
| 0
| 0
| 0
|
bb2ad701ba189d46d6b8954c67ceae977de8da75
| 4,318
|
py
|
Python
|
sde/solvers.py
|
d-l-fernandes/ito_general
|
8a9889fa13e5893e923c8d32fd1c94d22aec84d9
|
[
"MIT"
] | null | null | null |
sde/solvers.py
|
d-l-fernandes/ito_general
|
8a9889fa13e5893e923c8d32fd1c94d22aec84d9
|
[
"MIT"
] | null | null | null |
sde/solvers.py
|
d-l-fernandes/ito_general
|
8a9889fa13e5893e923c8d32fd1c94d22aec84d9
|
[
"MIT"
] | null | null | null |
from typing import Tuple
import haiku as hk
import jax.numpy as jnp
import numpyro
from absl import flags
from numpyro.distributions.continuous import MultivariateNormal
from sde import drifts, diffusions
Array = jnp.ndarray
flags.DEFINE_enum("solver", "strong_3_halfs", ["euler_maruyama", "strong_3_halfs"], "Solver to use.")
FLAGS = flags.FLAGS
solvers_dict = {
"euler_maruyama": EulerMaruyamaSolver,
"strong_3_halfs": Strong3HalfsSolver
}
| 40.35514
| 117
| 0.619268
|
from typing import Tuple
import haiku as hk
import jax.numpy as jnp
import numpyro
from absl import flags
from numpyro.distributions.continuous import MultivariateNormal
from sde import drifts, diffusions
Array = jnp.ndarray
flags.DEFINE_enum("solver", "strong_3_halfs", ["euler_maruyama", "strong_3_halfs"], "Solver to use.")
FLAGS = flags.FLAGS
class BaseSolver:
def __init__(self, delta_t: float, beta_dims: int, drift: drifts.BaseDrift, diffusion: diffusions.BaseDiffusion):
self.delta_t = delta_t
self.beta_dims = beta_dims
self.drift = drift
self.diffusion = diffusion
def __call__(self, x_0: Array, time: float) -> Array:
raise NotImplementedError
class EulerMaruyamaSolver(BaseSolver):
def __init__(self, delta_t: float, beta_dims: int, drift: drifts.BaseDrift, diffusion: diffusions.BaseDiffusion):
super().__init__(delta_t, beta_dims, drift, diffusion)
def __call__(self, x_0: Array, time: float) -> Array:
rng_beta = hk.next_rng_key()
delta_beta = numpyro.sample("delta_beta",
MultivariateNormal(
loc=jnp.zeros(self.beta_dims),
scale_tril=jnp.sqrt(self.delta_t) * jnp.eye(self.beta_dims)),
rng_key=rng_beta)
drift = self.drift(x_0, time)
diff = self.diffusion(x_0, time)
x_1 = x_0 + drift * self.delta_t + jnp.matmul(diff, delta_beta)
return x_1
class Strong3HalfsSolver(BaseSolver):
def __init__(self, delta_t: float, beta_dims: int, drift: drifts.BaseDrift, diffusion: diffusions.BaseDiffusion):
super().__init__(delta_t, beta_dims, drift, diffusion)
def __call__(self, x_0: Array, time: float) -> Array:
rng_beta = hk.next_rng_key()
# Vector of zeros
beta_mean_vector = jnp.zeros((self.beta_dims*2, ))
# Covariance matrix for the betas and gammas
beta_covariance_top_left = self.delta_t ** 3 / 3 * jnp.eye(self.beta_dims)
beta_covariance_top_right = self.delta_t ** 2 / 2 * jnp.eye(self.beta_dims)
beta_covariance_bottom_right = self.delta_t * jnp.eye(self.beta_dims)
beta_covariance_top = jnp.concatenate((beta_covariance_top_left, beta_covariance_top_right), axis=1)
beta_covariance_bottom = jnp.concatenate((beta_covariance_top_right, beta_covariance_bottom_right),
axis=1)
beta_covariance = jnp.concatenate((beta_covariance_top, beta_covariance_bottom), axis=0)
delta_gamma_beta = numpyro.sample("delta_gamma_beta",
MultivariateNormal(loc=beta_mean_vector,
covariance_matrix=beta_covariance),
rng_key=rng_beta)
delta_gamma = delta_gamma_beta[:self.beta_dims]
delta_beta = delta_gamma_beta[self.beta_dims:]
drift_0 = self.drift(x_0, time)
diff = self.diffusion(x_0, time)
diff_plus = self.diffusion(x_0, time + self.delta_t)
init_x_1 = x_0 + drift_0 * self.delta_t + jnp.matmul(diff, delta_beta)
init_x_1 += 1. / self.delta_t * jnp.matmul(diff_plus - diff, delta_beta * self.delta_t - delta_gamma)
def scan_fn(carry, s):
x_1 = carry
x_0_plus = \
x_0 + drift_0 * self.delta_t / self.beta_dims + \
diff[:, s] * jnp.sqrt(self.delta_t)
x_0_minus = \
x_0 + drift_0 * self.delta_t / self.beta_dims - \
diff[:, s] * jnp.sqrt(self.delta_t)
drift_0_plus = self.drift(x_0_plus, time + self.delta_t)
drift_0_minus = self.drift(x_0_minus, time + self.delta_t)
x_1 += 0.25 * self.delta_t * (drift_0_plus + drift_0_minus)
x_1 -= 0.5 * drift_0 * self.delta_t
x_1 += \
1. / (2 * jnp.sqrt(self.delta_t)) * (drift_0_plus - drift_0_minus) * delta_gamma[s]
return x_1, None
final_x_1, _ = hk.scan(scan_fn, init_x_1, jnp.arange(self.beta_dims))
return final_x_1
solvers_dict = {
"euler_maruyama": EulerMaruyamaSolver,
"strong_3_halfs": Strong3HalfsSolver
}
| 3,602
| 29
| 228
|
79cfe256477332ba59823cac9001633a38f29bc4
| 5,767
|
py
|
Python
|
pysnmp/JUNIPER-SONET-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/JUNIPER-SONET-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/JUNIPER-SONET-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module JUNIPER-SONET-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-SONET-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:50:16 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
ifIndex, ifDescr = mibBuilder.importSymbols("IF-MIB", "ifIndex", "ifDescr")
jnxMibs, jnxSonetNotifications = mibBuilder.importSymbols("JUNIPER-SMI", "jnxMibs", "jnxSonetNotifications")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Gauge32, iso, ObjectIdentity, TimeTicks, ModuleIdentity, Integer32, Counter32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter64, IpAddress, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Gauge32", "iso", "ObjectIdentity", "TimeTicks", "ModuleIdentity", "Integer32", "Counter32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter64", "IpAddress", "Bits")
DisplayString, DateAndTime, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "DateAndTime", "TextualConvention")
jnxSonet = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 20))
jnxSonet.setRevisions(('2002-12-12 00:00', '2002-08-08 00:00',))
if mibBuilder.loadTexts: jnxSonet.setLastUpdated('200307182154Z')
if mibBuilder.loadTexts: jnxSonet.setOrganization('Juniper Networks, Inc.')
jnxSonetAlarms = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1))
jnxSonetAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1), )
if mibBuilder.loadTexts: jnxSonetAlarmTable.setStatus('current')
jnxSonetAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: jnxSonetAlarmEntry.setStatus('current')
jnxSonetCurrentAlarms = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 1), JnxSonetAlarmId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetCurrentAlarms.setStatus('current')
jnxSonetLastAlarmId = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 2), JnxSonetAlarmId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmId.setStatus('current')
jnxSonetLastAlarmTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmTime.setStatus('current')
jnxSonetLastAlarmDate = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 4), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmDate.setStatus('current')
jnxSonetLastAlarmEvent = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("set", 2), ("cleared", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmEvent.setStatus('current')
jnxSonetNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0))
jnxSonetAlarmSet = NotificationType((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0, 1)).setObjects(("IF-MIB", "ifDescr"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmId"), ("JUNIPER-SONET-MIB", "jnxSonetCurrentAlarms"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmDate"))
if mibBuilder.loadTexts: jnxSonetAlarmSet.setStatus('current')
jnxSonetAlarmCleared = NotificationType((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0, 2)).setObjects(("IF-MIB", "ifDescr"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmId"), ("JUNIPER-SONET-MIB", "jnxSonetCurrentAlarms"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmDate"))
if mibBuilder.loadTexts: jnxSonetAlarmCleared.setStatus('current')
mibBuilder.exportSymbols("JUNIPER-SONET-MIB", jnxSonetAlarms=jnxSonetAlarms, jnxSonetCurrentAlarms=jnxSonetCurrentAlarms, jnxSonetLastAlarmTime=jnxSonetLastAlarmTime, jnxSonetAlarmTable=jnxSonetAlarmTable, JnxSonetAlarmId=JnxSonetAlarmId, jnxSonetLastAlarmEvent=jnxSonetLastAlarmEvent, jnxSonetAlarmSet=jnxSonetAlarmSet, PYSNMP_MODULE_ID=jnxSonet, jnxSonetNotificationPrefix=jnxSonetNotificationPrefix, jnxSonetAlarmCleared=jnxSonetAlarmCleared, jnxSonetAlarmEntry=jnxSonetAlarmEntry, jnxSonet=jnxSonet, jnxSonetLastAlarmId=jnxSonetLastAlarmId, jnxSonetLastAlarmDate=jnxSonetLastAlarmDate)
| 128.155556
| 904
| 0.752211
|
#
# PySNMP MIB module JUNIPER-SONET-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-SONET-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:50:16 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
ifIndex, ifDescr = mibBuilder.importSymbols("IF-MIB", "ifIndex", "ifDescr")
jnxMibs, jnxSonetNotifications = mibBuilder.importSymbols("JUNIPER-SMI", "jnxMibs", "jnxSonetNotifications")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Gauge32, iso, ObjectIdentity, TimeTicks, ModuleIdentity, Integer32, Counter32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter64, IpAddress, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Gauge32", "iso", "ObjectIdentity", "TimeTicks", "ModuleIdentity", "Integer32", "Counter32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter64", "IpAddress", "Bits")
DisplayString, DateAndTime, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "DateAndTime", "TextualConvention")
jnxSonet = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 20))
jnxSonet.setRevisions(('2002-12-12 00:00', '2002-08-08 00:00',))
if mibBuilder.loadTexts: jnxSonet.setLastUpdated('200307182154Z')
if mibBuilder.loadTexts: jnxSonet.setOrganization('Juniper Networks, Inc.')
class JnxSonetAlarmId(TextualConvention, Bits):
status = 'current'
namedValues = NamedValues(("sonetLolAlarm", 0), ("sonetPllAlarm", 1), ("sonetLofAlarm", 2), ("sonetLosAlarm", 3), ("sonetSefAlarm", 4), ("sonetLaisAlarm", 5), ("sonetPaisAlarm", 6), ("sonetLopAlarm", 7), ("sonetBerrSdAlarm", 8), ("sonetBerrSfAlarm", 9), ("sonetLrdiAlarm", 10), ("sonetPrdiAlarm", 11), ("sonetReiAlarm", 12), ("sonetUneqAlarm", 13), ("sonetPmisAlarm", 14), ("sonetLocAlarm", 15), ("sonetVaisAlarm", 16), ("sonetVlopAlarm", 17), ("sonetVrdiAlarm", 18), ("sonetVuneqAlarm", 19), ("sonetVmisAlarm", 20), ("sonetVlocAlarm", 21), ("sdhLolAlarm", 22), ("sdhPllAlarm", 23), ("sdhLofAlarm", 24), ("sdhLosAlarm", 25), ("sdhOofAlarm", 26), ("sdhMsAisAlarm", 27), ("sdhHpAisAlarm", 28), ("sdhLopAlarm", 29), ("sdhBerrSdAlarm", 30), ("sdhBerrSfAlarm", 31), ("sdhMsFerfAlarm", 32), ("sdhHpFerfAlarm", 33), ("sdhMsFebeAlarm", 34), ("sdhHpUneqAlarm", 35), ("sdhHpMisAlarm", 36), ("sdhLocAlarm", 37))
jnxSonetAlarms = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1))
jnxSonetAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1), )
if mibBuilder.loadTexts: jnxSonetAlarmTable.setStatus('current')
jnxSonetAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: jnxSonetAlarmEntry.setStatus('current')
jnxSonetCurrentAlarms = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 1), JnxSonetAlarmId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetCurrentAlarms.setStatus('current')
jnxSonetLastAlarmId = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 2), JnxSonetAlarmId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmId.setStatus('current')
jnxSonetLastAlarmTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmTime.setStatus('current')
jnxSonetLastAlarmDate = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 4), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmDate.setStatus('current')
jnxSonetLastAlarmEvent = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("set", 2), ("cleared", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmEvent.setStatus('current')
jnxSonetNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0))
jnxSonetAlarmSet = NotificationType((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0, 1)).setObjects(("IF-MIB", "ifDescr"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmId"), ("JUNIPER-SONET-MIB", "jnxSonetCurrentAlarms"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmDate"))
if mibBuilder.loadTexts: jnxSonetAlarmSet.setStatus('current')
jnxSonetAlarmCleared = NotificationType((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0, 2)).setObjects(("IF-MIB", "ifDescr"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmId"), ("JUNIPER-SONET-MIB", "jnxSonetCurrentAlarms"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmDate"))
if mibBuilder.loadTexts: jnxSonetAlarmCleared.setStatus('current')
mibBuilder.exportSymbols("JUNIPER-SONET-MIB", jnxSonetAlarms=jnxSonetAlarms, jnxSonetCurrentAlarms=jnxSonetCurrentAlarms, jnxSonetLastAlarmTime=jnxSonetLastAlarmTime, jnxSonetAlarmTable=jnxSonetAlarmTable, JnxSonetAlarmId=JnxSonetAlarmId, jnxSonetLastAlarmEvent=jnxSonetLastAlarmEvent, jnxSonetAlarmSet=jnxSonetAlarmSet, PYSNMP_MODULE_ID=jnxSonet, jnxSonetNotificationPrefix=jnxSonetNotificationPrefix, jnxSonetAlarmCleared=jnxSonetAlarmCleared, jnxSonetAlarmEntry=jnxSonetAlarmEntry, jnxSonet=jnxSonet, jnxSonetLastAlarmId=jnxSonetLastAlarmId, jnxSonetLastAlarmDate=jnxSonetLastAlarmDate)
| 0
| 954
| 22
|
53e52201c1934896690b7be806684fdec6283cd9
| 4,864
|
py
|
Python
|
test/test_client_ip.py
|
leeyangjie/unit
|
02f50533c4a476b91e4b39a7a2d052095d970983
|
[
"Apache-2.0"
] | null | null | null |
test/test_client_ip.py
|
leeyangjie/unit
|
02f50533c4a476b91e4b39a7a2d052095d970983
|
[
"Apache-2.0"
] | null | null | null |
test/test_client_ip.py
|
leeyangjie/unit
|
02f50533c4a476b91e4b39a7a2d052095d970983
|
[
"Apache-2.0"
] | null | null | null |
from unit.applications.lang.python import TestApplicationPython
| 35.246377
| 77
| 0.476563
|
from unit.applications.lang.python import TestApplicationPython
class TestClientIP(TestApplicationPython):
prerequisites = {'modules': {'python': 'any'}}
def client_ip(self, options):
assert 'success' in self.conf(
{
"127.0.0.1:7081": {
"client_ip": options,
"pass": "applications/client_ip",
},
"[::1]:7082": {
"client_ip": options,
"pass": "applications/client_ip",
},
},
'listeners',
), 'listeners configure'
def get_xff(self, xff, sock_type='ipv4'):
port = 7081 if sock_type == 'ipv4' else 7082
return self.get(
sock_type=sock_type,
port=port,
headers={'Connection': 'close', 'X-Forwarded-For': xff},
)['body']
def setup_method(self):
self.load('client_ip')
def test_settings_client_ip_single_ip(self):
self.client_ip(
{'header': 'X-Forwarded-For', 'source': '123.123.123.123'}
)
assert self.get(port=7081)['body'] == '127.0.0.1', 'ipv4 default'
assert (
self.get(sock_type='ipv6', port=7082)['body'] == '::1'
), 'ipv6 default'
assert self.get_xff('1.1.1.1') == '127.0.0.1', 'bad source'
assert self.get_xff('blah') == '127.0.0.1', 'bad header'
assert self.get_xff('1.1.1.1', 'ipv6') == '::1', 'bad source ipv6'
self.client_ip({'header': 'X-Forwarded-For', 'source': '127.0.0.1'})
assert self.get(port=7081)['body'] == '127.0.0.1', 'ipv4 default 2'
assert (
self.get(sock_type='ipv6', port=7082)['body'] == '::1'
), 'ipv6 default 2'
assert self.get_xff('1.1.1.1') == '1.1.1.1', 'replace'
assert self.get_xff('blah') == '127.0.0.1', 'bad header 2'
assert self.get_xff('1.1.1.1', 'ipv6') == '::1', 'bad source ipv6 2'
self.client_ip({'header': 'X-Forwarded-For', 'source': '!127.0.0.1'})
assert self.get_xff('1.1.1.1') == '127.0.0.1', 'bad source 3'
assert self.get_xff('1.1.1.1', 'ipv6') == '1.1.1.1', 'replace 2'
def test_settings_client_ip_ipv4(self):
self.client_ip({'header': 'X-Forwarded-For', 'source': '127.0.0.1'})
assert (
self.get_xff('8.8.8.8, 84.23.23.11') == '84.23.23.11'
), 'xff replace'
assert (
self.get_xff('8.8.8.8, 84.23.23.11, 127.0.0.1') == '127.0.0.1'
), 'xff replace 2'
assert (
self.get_xff(['8.8.8.8', '127.0.0.1, 10.0.1.1']) == '10.0.1.1'
), 'xff replace multi'
def test_settings_client_ip_ipv6(self):
self.client_ip({'header': 'X-Forwarded-For', 'source': '::1'})
assert self.get_xff('1.1.1.1') == '127.0.0.1', 'bad source ipv4'
for ip in [
'f607:7403:1e4b:6c66:33b2:843f:2517:da27',
'2001:db8:3c4d:15::1a2f:1a2b',
'2001::3c4d:15:1a2f:1a2b',
'::11.22.33.44',
]:
assert self.get_xff(ip, 'ipv6') == ip, 'replace'
def test_settings_client_ip_recursive(self):
self.client_ip(
{
'header': 'X-Forwarded-For',
'recursive': True,
'source': ['127.0.0.1', '10.50.0.17', '10.5.2.1'],
}
)
assert self.get_xff('1.1.1.1') == '1.1.1.1', 'xff chain'
assert self.get_xff('1.1.1.1, 10.5.2.1') == '1.1.1.1', 'xff chain 2'
assert (
self.get_xff('8.8.8.8, 1.1.1.1, 10.5.2.1') == '1.1.1.1'
), 'xff chain 3'
assert (
self.get_xff('10.50.0.17, 10.5.2.1, 10.5.2.1') == '10.50.0.17'
), 'xff chain 4'
assert (
self.get_xff(['8.8.8.8', '1.1.1.1, 127.0.0.1']) == '1.1.1.1'
), 'xff replace multi'
assert (
self.get_xff(['8.8.8.8', '1.1.1.1, 127.0.0.1', '10.5.2.1'])
== '1.1.1.1'
), 'xff replace multi 2'
assert (
self.get_xff(['10.5.2.1', '10.50.0.17, 1.1.1.1', '10.5.2.1'])
== '1.1.1.1'
), 'xff replace multi 3'
assert (
self.get_xff('8.8.8.8, 2001:db8:3c4d:15::1a2f:1a2b, 127.0.0.1')
== '2001:db8:3c4d:15::1a2f:1a2b'
), 'xff chain ipv6'
def test_settings_client_ip_invalid(self):
assert 'error' in self.conf(
{
"http": {
"client_ip": {'header': 'X-Forwarded-For', 'source': []}
}
},
'settings',
), 'empty array source'
assert 'error' in self.conf(
{
"http": {
"client_ip": {'header': 'X-Forwarded-For', 'source': 'a'}
}
},
'settings',
), 'empty source invalid'
| 4,488
| 288
| 23
|
74a36f9de503409718965b9b6bc829fa35d95202
| 190
|
py
|
Python
|
dev.py
|
LCBRU/batch_demographics
|
e516e958091fd74dad00b1705431ac030e3c4503
|
[
"MIT"
] | null | null | null |
dev.py
|
LCBRU/batch_demographics
|
e516e958091fd74dad00b1705431ac030e3c4503
|
[
"MIT"
] | null | null | null |
dev.py
|
LCBRU/batch_demographics
|
e516e958091fd74dad00b1705431ac030e3c4503
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from batch_demographics import create_app
from config import DevConfig
app = create_app(DevConfig())
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
| 23.75
| 41
| 0.736842
|
#!/usr/bin/env python
from batch_demographics import create_app
from config import DevConfig
app = create_app(DevConfig())
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
| 0
| 0
| 0
|
dc71d49cfcda3d4e87c6a2b9fa01e89c155ee69a
| 151
|
py
|
Python
|
widgetProject/widgetApp/admin.py
|
cs-fullstack-2019-spring/django-fields-widgets-cw-rsalcido
|
4b19595867ee38396d0a80bfa0adcd0cb9811d23
|
[
"Apache-2.0"
] | null | null | null |
widgetProject/widgetApp/admin.py
|
cs-fullstack-2019-spring/django-fields-widgets-cw-rsalcido
|
4b19595867ee38396d0a80bfa0adcd0cb9811d23
|
[
"Apache-2.0"
] | null | null | null |
widgetProject/widgetApp/admin.py
|
cs-fullstack-2019-spring/django-fields-widgets-cw-rsalcido
|
4b19595867ee38396d0a80bfa0adcd0cb9811d23
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import supeHero
# Register your models here.
admin.site.register(supeHero)
# Register your models here.
| 21.571429
| 32
| 0.801325
|
from django.contrib import admin
from .models import supeHero
# Register your models here.
admin.site.register(supeHero)
# Register your models here.
| 0
| 0
| 0
|
0430b1b3554d1367b14b734250b34ede8b260068
| 337
|
py
|
Python
|
main.py
|
pooyapooya/rizpardazande
|
818721a3daac1385daf71ac508ad00bf153cbf0b
|
[
"MIT"
] | null | null | null |
main.py
|
pooyapooya/rizpardazande
|
818721a3daac1385daf71ac508ad00bf153cbf0b
|
[
"MIT"
] | null | null | null |
main.py
|
pooyapooya/rizpardazande
|
818721a3daac1385daf71ac508ad00bf153cbf0b
|
[
"MIT"
] | null | null | null |
from easygui.boxes.choice_box import choicebox
from easygui.boxes.text_box import textbox
from phase1 import phase1
from phase2 import phase2
__author__ = 'po0ya'
choices = [
'Phase1',
'Phase2'
]
choice = choicebox(msg='Please select project phase:', choices=choices)
if choice == choices[0]:
phase1()
else:
phase2()
| 17.736842
| 71
| 0.724036
|
from easygui.boxes.choice_box import choicebox
from easygui.boxes.text_box import textbox
from phase1 import phase1
from phase2 import phase2
__author__ = 'po0ya'
choices = [
'Phase1',
'Phase2'
]
choice = choicebox(msg='Please select project phase:', choices=choices)
if choice == choices[0]:
phase1()
else:
phase2()
| 0
| 0
| 0
|
4471ff0d57c1cc3ec8a60aec1f93edea9763dd0c
| 2,984
|
py
|
Python
|
saws/data_util.py
|
Pangeam/saws
|
5aba511e72bf5feb35eb44be82fbdf805dfe3553
|
[
"Apache-2.0"
] | 5,358
|
2015-09-18T19:16:11.000Z
|
2022-03-31T20:40:51.000Z
|
saws/data_util.py
|
Pangeam/saws
|
5aba511e72bf5feb35eb44be82fbdf805dfe3553
|
[
"Apache-2.0"
] | 112
|
2015-09-10T10:53:57.000Z
|
2022-03-03T09:32:29.000Z
|
saws/data_util.py
|
Pangeam/saws
|
5aba511e72bf5feb35eb44be82fbdf805dfe3553
|
[
"Apache-2.0"
] | 333
|
2015-09-18T19:16:13.000Z
|
2022-03-06T17:27:54.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2015 Donne Martin. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
import re
try:
from collections import OrderedDict
except:
from ordereddict import OrderedDict
class DataUtil(object):
"""Utility class to read from the data folder.
Attributes:
* None.
"""
def create_header_to_type_map(self, headers, data_type):
"""Creates a dict mapping headers to ResourceTypes.
Headers are the resource headers as they appear in the RESOURCES.txt.
Headers are mapped to their corresponding ResourceType.
Args:
* headers: A string that represents the header.
* data_type: An Enum specifying the data type.
Returns:
An OrderedDict mapping headers to ResourceTypes.
"""
command_types = []
for item in data_type:
if item != data_type.NUM_TYPES:
command_types.append(item)
return OrderedDict(zip(headers, command_types))
def get_data(self, data_file_path, header_to_type_map, data_type):
"""Gets all data from the specified data file.
Args:
* data_file_path: A string representing the full file path of
the data file.
* header_to_type_map: A dictionary mapping the data header labels
to the data types.
* data_type: An Enum specifying the data type.
Returns:
A list, where each element is a list of completions for each
data_type
"""
data_lists = [[] for x in range(data_type.NUM_TYPES.value)]
with open(data_file_path) as f:
for line in f:
line = re.sub('\n', '', line)
parsing_header = False
# Check if we are reading in a data header to determine
# which set of data we are parsing
for key, value in header_to_type_map.items():
if key in line:
data_type = value
parsing_header = True
break
if not parsing_header:
# Store the data in its associated list
if line.strip() != '':
data_lists[data_type.value].append(line)
for data_list in data_lists:
data_list.sort()
return data_lists
| 35.52381
| 77
| 0.610925
|
# -*- coding: utf-8 -*-
# Copyright 2015 Donne Martin. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
import re
try:
from collections import OrderedDict
except:
from ordereddict import OrderedDict
class DataUtil(object):
"""Utility class to read from the data folder.
Attributes:
* None.
"""
def create_header_to_type_map(self, headers, data_type):
"""Creates a dict mapping headers to ResourceTypes.
Headers are the resource headers as they appear in the RESOURCES.txt.
Headers are mapped to their corresponding ResourceType.
Args:
* headers: A string that represents the header.
* data_type: An Enum specifying the data type.
Returns:
An OrderedDict mapping headers to ResourceTypes.
"""
command_types = []
for item in data_type:
if item != data_type.NUM_TYPES:
command_types.append(item)
return OrderedDict(zip(headers, command_types))
def get_data(self, data_file_path, header_to_type_map, data_type):
"""Gets all data from the specified data file.
Args:
* data_file_path: A string representing the full file path of
the data file.
* header_to_type_map: A dictionary mapping the data header labels
to the data types.
* data_type: An Enum specifying the data type.
Returns:
A list, where each element is a list of completions for each
data_type
"""
data_lists = [[] for x in range(data_type.NUM_TYPES.value)]
with open(data_file_path) as f:
for line in f:
line = re.sub('\n', '', line)
parsing_header = False
# Check if we are reading in a data header to determine
# which set of data we are parsing
for key, value in header_to_type_map.items():
if key in line:
data_type = value
parsing_header = True
break
if not parsing_header:
# Store the data in its associated list
if line.strip() != '':
data_lists[data_type.value].append(line)
for data_list in data_lists:
data_list.sort()
return data_lists
| 0
| 0
| 0
|
98ef697ccd1d0ce81f3545bc903eb1ff39fa24a2
| 701
|
py
|
Python
|
onlinecourse/migrations/0003_homeworkanswer.py
|
Moneto3o/Course-Mangement-System
|
2b2a8548e08fd6f3185ad23559eae841881326cf
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/migrations/0003_homeworkanswer.py
|
Moneto3o/Course-Mangement-System
|
2b2a8548e08fd6f3185ad23559eae841881326cf
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/migrations/0003_homeworkanswer.py
|
Moneto3o/Course-Mangement-System
|
2b2a8548e08fd6f3185ad23559eae841881326cf
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.3 on 2021-06-23 02:36
from django.db import migrations, models
import django.db.models.deletion
| 30.478261
| 121
| 0.626248
|
# Generated by Django 3.1.3 on 2021-06-23 02:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0002_auto_20210623_0941'),
]
operations = [
migrations.CreateModel(
name='Homeworkanswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choices', models.ManyToManyField(to='onlinecourse.Choice')),
('homework', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.homework')),
],
),
]
| 0
| 554
| 23
|
371df930ffaa78a55012c519e4c329c1f8f97c56
| 1,166
|
py
|
Python
|
leetcode/33_Search_in_Rotated_Sorted_Array.py
|
PhillipLeeHub/algorithm-and-data-structure
|
c0c27fee1b4fd634084da0b41395a26307d76e69
|
[
"MIT"
] | 1
|
2020-05-01T21:29:17.000Z
|
2020-05-01T21:29:17.000Z
|
leetcode/33_Search_in_Rotated_Sorted_Array.py
|
PhillipLeeHub/algorithm-and-data-structure
|
c0c27fee1b4fd634084da0b41395a26307d76e69
|
[
"MIT"
] | null | null | null |
leetcode/33_Search_in_Rotated_Sorted_Array.py
|
PhillipLeeHub/algorithm-and-data-structure
|
c0c27fee1b4fd634084da0b41395a26307d76e69
|
[
"MIT"
] | 1
|
2020-06-12T23:32:14.000Z
|
2020-06-12T23:32:14.000Z
|
'''
33. Search in Rotated Sorted Array Medium
Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).
You are given a target value to search. If found in the array return its index, otherwise return -1.
You may assume no duplicate exists in the array.
Your algorithm's runtime complexity must be in the order of O(log n).
Example 1:
Input: nums = [4,5,6,7,0,1,2], target = 0
Output: 4
Example 2:
Input: nums = [4,5,6,7,0,1,2], target = 3
Output: -1
'''
| 27.116279
| 100
| 0.511149
|
'''
33. Search in Rotated Sorted Array Medium
Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).
You are given a target value to search. If found in the array return its index, otherwise return -1.
You may assume no duplicate exists in the array.
Your algorithm's runtime complexity must be in the order of O(log n).
Example 1:
Input: nums = [4,5,6,7,0,1,2], target = 0
Output: 4
Example 2:
Input: nums = [4,5,6,7,0,1,2], target = 3
Output: -1
'''
class Solution:
def search(self, nums: List[int], target: int) -> int:
l, r = 0, len(nums)-1
while l <= r:
mid = l + (r-l)//2
if nums[mid] == target:
return mid
# Check if pivot applies
if nums[l] <= nums[mid]:
if nums[l] <= target < nums[mid]:
r = mid - 1
else:
l = mid + 1
else:
if nums[mid] < target <= nums[r]:
l = mid + 1
else:
r = mid - 1
return -1
| 570
| -6
| 48
|
dc96d3db43b70b02ef24b2ea06e3b4ff9e9d80ce
| 1,331
|
py
|
Python
|
glashammer/bundles/contrib/auth/openidauth.py
|
passy/glashammer-rdrei
|
9e56952d70b961d8945707469aad9cfe97c4e7b7
|
[
"MIT"
] | 1
|
2016-07-04T15:23:59.000Z
|
2016-07-04T15:23:59.000Z
|
glashammer/bundles/contrib/auth/openidauth.py
|
passy/glashammer-rdrei
|
9e56952d70b961d8945707469aad9cfe97c4e7b7
|
[
"MIT"
] | null | null | null |
glashammer/bundles/contrib/auth/openidauth.py
|
passy/glashammer-rdrei
|
9e56952d70b961d8945707469aad9cfe97c4e7b7
|
[
"MIT"
] | null | null | null |
"""
glashammer.bundles.contrib.auth.openidauth
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Open ID Support for Glashammer
:copyright: 2010 Glashammer Developers
:license: MIT
"""
from openid.consumer.consumer import Consumer, SUCCESS, CANCEL
from werkzeug import redirect
from glashammer.utils import sibpath, url_for, Response
from glashammer.bundles.sessions import setup_sessions, get_session
from glashammer.bundles.auth import setup_auth, login
| 28.319149
| 71
| 0.661908
|
"""
glashammer.bundles.contrib.auth.openidauth
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Open ID Support for Glashammer
:copyright: 2010 Glashammer Developers
:license: MIT
"""
from openid.consumer.consumer import Consumer, SUCCESS, CANCEL
from werkzeug import redirect
from glashammer.utils import sibpath, url_for, Response
from glashammer.bundles.sessions import setup_sessions, get_session
from glashammer.bundles.auth import setup_auth, login
def login_view(req):
session = get_session()
consumer = Consumer(session, None)
url_back = url_for('openid/login', _external=True)
if req.method == 'POST':
url = req.form.get('openid')
authreq = consumer.begin(url)
return redirect(authreq.redirectURL(url_back, url_back))
else:
res = consumer.complete(req.args, url_back)
if res.status == SUCCESS:
identity = req.args.get('openid.identity')
login(identity)
return Response('Successfully logged in as: %s' % identity)
elif res.status == CANCEL:
return Response('Cancelled')
else:
return Response('Nope')
print res == SUCCESS
def setup_openid(app):
app.add_setup(setup_sessions)
app.add_setup(setup_auth)
app.add_url('/openid/login', 'openid/login', view=login_view)
| 828
| 0
| 46
|
8f558f6a46b75c6d332a9f4e5b2539ae3d50b5f6
| 2,544
|
py
|
Python
|
baseline_preprocess.py
|
shiningliang/DIMM
|
adc9ff2bea0921cffe91989a1adc95184d81e6a5
|
[
"Apache-2.0"
] | 1
|
2021-03-01T12:28:26.000Z
|
2021-03-01T12:28:26.000Z
|
baseline_preprocess.py
|
shiningliang/DIMM
|
adc9ff2bea0921cffe91989a1adc95184d81e6a5
|
[
"Apache-2.0"
] | null | null | null |
baseline_preprocess.py
|
shiningliang/DIMM
|
adc9ff2bea0921cffe91989a1adc95184d81e6a5
|
[
"Apache-2.0"
] | 1
|
2020-11-07T16:11:00.000Z
|
2020-11-07T16:11:00.000Z
|
import pandas as pd
import numpy as np
import os
import pickle as pkl
import json
from tqdm import tqdm
single_task = ['5849', '25000', '41401', '4019']
for task in single_task:
path = 'data/preprocessed_data/baseline/' + task
if not os.path.exists(path):
os.makedirs(path)
train_data, dim = preprocess_data('data/raw_data/' + task + '/train', task)
save(train_data, 'data/preprocessed_data/baseline/' + task, 'train')
test_data, dim = preprocess_data('data/raw_data/' + task + '/test', task)
save(test_data, 'data/preprocessed_data/baseline/' + task, 'test')
| 34.378378
| 91
| 0.605739
|
import pandas as pd
import numpy as np
import os
import pickle as pkl
import json
from tqdm import tqdm
def convert_samples(samples):
print('Converting samples...')
indexes = samples[0]['index']
labels = [samples[0]['label']] * len(indexes)
del samples[0]
for sample in tqdm(samples):
# indexes = np.vstack((indexes, sample['index']))
indexes = np.concatenate((indexes, sample['index']), axis=0)
labels += [sample['label']] * len(sample['index'])
labels = np.asarray(labels)
return indexes, labels
def preprocess_data(data_path, task_type):
samples = []
labels = []
total = 0
print('Reading raw files for {}...'.format(task_type))
for file in tqdm(os.listdir(data_path)):
total += 1
if file.startswith('0'):
dead = 0
else:
dead = 1
raw_sample = pd.read_csv(os.path.join(data_path, file), sep=',')
raw_sample = raw_sample.fillna(0)
medicine = raw_sample.iloc[:, 209:].as_matrix()
index = raw_sample.iloc[:, 3:208].as_matrix()
index = np.concatenate((index, medicine), axis=1)
index = index.tolist()
samples += index
labels += [dead] * len(index)
# sample = {'patient_id': total,
# 'index': index,
# 'label': dead,
# 'name': file}
# samples.append(sample)
# train_samples, test_samples = train_test_split(samples, test_size=0.2)
# dim = samples[0]['index'].shape[1]
dim = len(samples[0])
# del samples
# indexes, labels = convert_samples(samples)
print('Num of samples : ', len(samples))
return [np.asarray(samples, dtype=np.float32), np.asarray(labels, dtype=np.int32)], dim
def save(data, path, data_type):
print('Saving {} data...'.format(data_type))
np.save(os.path.join(path, data_type + '_x.npy'), data[0])
np.save(os.path.join(path, data_type + '_y.npy'), data[1])
# with open(file_name, 'w') as f:
# json.dump(data, f)
# f.close()
single_task = ['5849', '25000', '41401', '4019']
for task in single_task:
path = 'data/preprocessed_data/baseline/' + task
if not os.path.exists(path):
os.makedirs(path)
train_data, dim = preprocess_data('data/raw_data/' + task + '/train', task)
save(train_data, 'data/preprocessed_data/baseline/' + task, 'train')
test_data, dim = preprocess_data('data/raw_data/' + task + '/test', task)
save(test_data, 'data/preprocessed_data/baseline/' + task, 'test')
| 1,878
| 0
| 69
|
69654bd4eaf9e6ddbb228cf5eb6cfebf7c5a9c0f
| 754
|
py
|
Python
|
appengine/app.py
|
wickedchicken/timebug
|
a7164dd0c9b7e992deba5a232073d001bd3448c1
|
[
"BSD-2-Clause"
] | null | null | null |
appengine/app.py
|
wickedchicken/timebug
|
a7164dd0c9b7e992deba5a232073d001bd3448c1
|
[
"BSD-2-Clause"
] | 1
|
2019-10-05T21:48:45.000Z
|
2019-10-05T21:48:45.000Z
|
appengine/app.py
|
wickedchicken/timebug
|
a7164dd0c9b7e992deba5a232073d001bd3448c1
|
[
"BSD-2-Clause"
] | null | null | null |
import webapp2
from webapp2_extras import jinja2
class BaseHandler(webapp2.RequestHandler):
"""Provide a cached Jinja environment to each request."""
@webapp2.cached_property
app = webapp2.WSGIApplication([("/_ah/warmup", MainPage),
('/', MainPage),
])
| 27.925926
| 63
| 0.690981
|
import webapp2
from webapp2_extras import jinja2
class BaseHandler(webapp2.RequestHandler):
"""Provide a cached Jinja environment to each request."""
@webapp2.cached_property
def jinja2(self):
# Returns a Jinja2 renderer cached in the app registry.
return jinja2.get_jinja2(app=self.app)
def render_response(self, _template, **context):
# Renders a template and writes the result to the response.
rv = self.jinja2.render_template(_template, **context)
self.response.write(rv)
class MainPage(BaseHandler):
def get(self):
context = {'title': 'Example App'}
self.render_response('main.html', **context)
app = webapp2.WSGIApplication([("/_ah/warmup", MainPage),
('/', MainPage),
])
| 358
| 7
| 96
|
6200257d7e59c1ae462e27c608d6cbec78e80fee
| 665
|
py
|
Python
|
tests/choice/test_choice.py
|
xlurio/RockPaperScissorsPy
|
927bbd1480dbca70c9bc3b982f4034ac2ff33c57
|
[
"MIT"
] | null | null | null |
tests/choice/test_choice.py
|
xlurio/RockPaperScissorsPy
|
927bbd1480dbca70c9bc3b982f4034ac2ff33c57
|
[
"MIT"
] | null | null | null |
tests/choice/test_choice.py
|
xlurio/RockPaperScissorsPy
|
927bbd1480dbca70c9bc3b982f4034ac2ff33c57
|
[
"MIT"
] | null | null | null |
import unittest
from src.choice import Choice
from src.choice.exceptions import InvalidChoiceException
class UserChoiceTests(unittest.TestCase):
"""Tests for the user choice class"""
def test_valid_choice(self):
"""Test valid user input"""
choice = Choice('r')
self.assertTrue(choice.is_valid())
self.assertEqual(choice.get_choice(), 0)
def test_invalid_choice(self):
"""Test invalid user input"""
choice = Choice('h')
self.assertFalse(choice.is_valid())
with self.assertRaises(InvalidChoiceException):
choice.get_choice()
if __name__ == '__main__':
unittest.main()
| 25.576923
| 56
| 0.666165
|
import unittest
from src.choice import Choice
from src.choice.exceptions import InvalidChoiceException
class UserChoiceTests(unittest.TestCase):
"""Tests for the user choice class"""
def test_valid_choice(self):
"""Test valid user input"""
choice = Choice('r')
self.assertTrue(choice.is_valid())
self.assertEqual(choice.get_choice(), 0)
def test_invalid_choice(self):
"""Test invalid user input"""
choice = Choice('h')
self.assertFalse(choice.is_valid())
with self.assertRaises(InvalidChoiceException):
choice.get_choice()
if __name__ == '__main__':
unittest.main()
| 0
| 0
| 0
|
0713d78df8377d0c6b7c00eaf68bd9b197d1fbac
| 609
|
py
|
Python
|
networkit/test/test_centrality.py
|
angriman/network
|
3a4c5fd32eb2be8d5b34eaee17f8fe4e6e141894
|
[
"MIT"
] | 366
|
2019-06-27T18:48:18.000Z
|
2022-03-29T08:36:49.000Z
|
networkit/test/test_centrality.py
|
angriman/network
|
3a4c5fd32eb2be8d5b34eaee17f8fe4e6e141894
|
[
"MIT"
] | 387
|
2019-06-24T11:30:39.000Z
|
2022-03-31T10:37:28.000Z
|
networkit/test/test_centrality.py
|
angriman/network
|
3a4c5fd32eb2be8d5b34eaee17f8fe4e6e141894
|
[
"MIT"
] | 131
|
2019-07-04T15:40:13.000Z
|
2022-03-29T12:34:23.000Z
|
#!/usr/bin/env python3
import unittest
import os
import networkit as nk
if __name__ == "__main__":
unittest.main()
| 18.454545
| 60
| 0.651888
|
#!/usr/bin/env python3
import unittest
import os
import networkit as nk
class Test_Centrality(unittest.TestCase):
def test_DegreeCentrality(self):
g = nk.Graph(8, False, False)
g.addEdge(0, 2)
g.addEdge(0, 5)
g.addEdge(1, 2)
g.addEdge(2, 3)
g.addEdge(2, 2)
g.addEdge(2, 4)
g.addEdge(3, 5)
g.addEdge(4, 5)
g.addEdge(5, 5)
g.addEdge(5, 6)
g.addEdge(5, 7)
g.addEdge(7, 7)
expected_result = [2.0, 1.0, 4.0, 2.0, 2.0, 5.0, 1.0, 1.0]
dc = nk.centrality.DegreeCentrality(g).run().scores()
self.assertListEqual(expected_result, dc)
if __name__ == "__main__":
unittest.main()
| 424
| 20
| 47
|
6bff15a4444dc05907bed9946e38668c9876c9d2
| 931
|
py
|
Python
|
model.py
|
Ayantika22/Flask-program-For-Wine-Dataset
|
aa28a92116c1b2735883e43ed1ef3c9f58239bbc
|
[
"MIT"
] | 2
|
2020-06-29T15:14:56.000Z
|
2020-11-26T08:48:43.000Z
|
model.py
|
Ayantika22/Flask-Program-for-Wine-Dataset
|
aa28a92116c1b2735883e43ed1ef3c9f58239bbc
|
[
"MIT"
] | null | null | null |
model.py
|
Ayantika22/Flask-Program-for-Wine-Dataset
|
aa28a92116c1b2735883e43ed1ef3c9f58239bbc
|
[
"MIT"
] | 1
|
2021-05-08T10:18:50.000Z
|
2021-05-08T10:18:50.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 28 15:03:12 2020
@author: Ayantika
"""
# Importing necessary libraries
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import pickle
# Reading the data
Wine = pd.read_csv("Wine.csv")
print(Wine.head())
#iris.drop("Id", axis=1, inplace = True)
y = Wine['Customer_Segment']
Wine.drop(columns='Customer_Segment',inplace=True)
X = Wine[['Alcohol', 'Malic_Acid', 'Ash', 'Ash_Alcanity', 'Magnesium', 'Total_Phenols', 'Flavanoids',
'Nonflavanoid_Phenols', 'Proanthocyanins', 'Color_Intensity', 'Hue', 'OD280', 'Proline']]
# Training the model
x_train,x_test,y_train,y_test = train_test_split(X,y, test_size=0.3)
model = LogisticRegression()
model.fit(x_train,y_train)
pickle.dump(model,open('model.pkl','wb'))
| 30.032258
| 103
| 0.712137
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 28 15:03:12 2020
@author: Ayantika
"""
# Importing necessary libraries
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import pickle
# Reading the data
Wine = pd.read_csv("Wine.csv")
print(Wine.head())
#iris.drop("Id", axis=1, inplace = True)
y = Wine['Customer_Segment']
Wine.drop(columns='Customer_Segment',inplace=True)
X = Wine[['Alcohol', 'Malic_Acid', 'Ash', 'Ash_Alcanity', 'Magnesium', 'Total_Phenols', 'Flavanoids',
'Nonflavanoid_Phenols', 'Proanthocyanins', 'Color_Intensity', 'Hue', 'OD280', 'Proline']]
# Training the model
x_train,x_test,y_train,y_test = train_test_split(X,y, test_size=0.3)
model = LogisticRegression()
model.fit(x_train,y_train)
pickle.dump(model,open('model.pkl','wb'))
| 0
| 0
| 0
|
ce46e50cde4a68b91ffc35cb64668b934bd1c3de
| 2,737
|
py
|
Python
|
seed.py
|
jrrlokken/issue-tracking-system
|
33b9b68dd821c8a05cdbc8de87645f61da06f0bd
|
[
"MIT"
] | 1
|
2021-02-20T04:26:36.000Z
|
2021-02-20T04:26:36.000Z
|
seed.py
|
jrrlokken/issue-tracking-system
|
33b9b68dd821c8a05cdbc8de87645f61da06f0bd
|
[
"MIT"
] | 2
|
2021-04-06T18:14:38.000Z
|
2021-06-02T02:45:15.000Z
|
seed.py
|
jrrlokken/issue-tracking-system
|
33b9b68dd821c8a05cdbc8de87645f61da06f0bd
|
[
"MIT"
] | null | null | null |
from app import app
from models import db, connect_db, User, Issue, Comment, Priority, Status, Category, Role
# Drop db tables and create them anew
db.drop_all()
db.create_all()
# Priority
p1 = Priority(
priority_id=0,
priority_label="Low"
)
p2 = Priority(
priority_id=1,
priority_label="Medium"
)
p3 = Priority(
priority_id=2,
priority_label="High"
)
p4 = Priority(
priority_id=3,
priority_label="Urgent"
)
# Status
s1 = Status(
status_id=0,
status_label="Submitted"
)
s2 = Status(
status_id=1,
status_label="Assigned"
)
s3 = Status(
status_id=2,
status_label="Resolved"
)
# Category
c1 = Category(
category_id=0,
category_label="Technical Issue"
)
c2 = Category(
category_id=1,
category_label="Customer Complaint"
)
c3 = Category(
category_id=2,
category_label="Product Request"
)
ro1 = Role(
role_id=0,
role_label="user"
)
ro2 = Role(
role_id=1,
role_label="assignee"
)
ro3 = Role(
role_id=2,
role_label="admin"
)
db.session.add_all([p1,p2,p3,p4,s1,s2,s3,c1,c2,c3,ro1,ro2,ro3])
db.session.commit()
# Sample users.
# u1 = User.register(
# email="[email protected]",
# first_name="Admin",
# last_name="User",
# password="password1"
# )
# u2 = User.register(
# email="[email protected]",
# first_name="Regular",
# last_name="User",
# password="password2"
# )
# u3 = User.register(
# email="[email protected]",
# first_name="Assignee",
# last_name="User",
# password="password3"
# )
# u1.role = 2
# u3.role = 1
# db.session.commit()
# # Sample issues
# i1 = Issue(
# title="Printer on fire!",
# text="Huge flames are shooting out of paper tray 1!!! Please bring fire extinguisher ASAP!!!",
# reporter=2
# )
# i2 = Issue(
# title="Computer not responding",
# text="My PC is showing the loading spinner and will not respond to keyboard or mouse input. It has been doing this for 6 weeks.",
# reporter=2
# )
# i3 = Issue(
# title="Please bring in nacho flavored Beanfields chips",
# text="We're not saying you're going to get addicted to our slamming NACHO, but we're also not going to say you won't. Nacho-lly we're biased since it is our best seller. NACHO just has unmatched taste that makes being cheesy, well, cool and vegan. The kinda vegan you want at your barbecue so you can say, 'yeah NACHO came with me. We're good like that.' Nacho average tortilla chip.",
# category=2,
# reporter=3
# )
# i4 = Issue(
# title="Clerk was rude and dismissive",
# text="She told me to wear a mask, and I don't wanna!",
# category=1,
# reporter=3
# )
# db.session.add_all([i1,i2,i3,i4])
# db.session.commit()
| 19.411348
| 391
| 0.650347
|
from app import app
from models import db, connect_db, User, Issue, Comment, Priority, Status, Category, Role
# Drop db tables and create them anew
db.drop_all()
db.create_all()
# Priority
p1 = Priority(
priority_id=0,
priority_label="Low"
)
p2 = Priority(
priority_id=1,
priority_label="Medium"
)
p3 = Priority(
priority_id=2,
priority_label="High"
)
p4 = Priority(
priority_id=3,
priority_label="Urgent"
)
# Status
s1 = Status(
status_id=0,
status_label="Submitted"
)
s2 = Status(
status_id=1,
status_label="Assigned"
)
s3 = Status(
status_id=2,
status_label="Resolved"
)
# Category
c1 = Category(
category_id=0,
category_label="Technical Issue"
)
c2 = Category(
category_id=1,
category_label="Customer Complaint"
)
c3 = Category(
category_id=2,
category_label="Product Request"
)
ro1 = Role(
role_id=0,
role_label="user"
)
ro2 = Role(
role_id=1,
role_label="assignee"
)
ro3 = Role(
role_id=2,
role_label="admin"
)
db.session.add_all([p1,p2,p3,p4,s1,s2,s3,c1,c2,c3,ro1,ro2,ro3])
db.session.commit()
# Sample users.
# u1 = User.register(
# email="[email protected]",
# first_name="Admin",
# last_name="User",
# password="password1"
# )
# u2 = User.register(
# email="[email protected]",
# first_name="Regular",
# last_name="User",
# password="password2"
# )
# u3 = User.register(
# email="[email protected]",
# first_name="Assignee",
# last_name="User",
# password="password3"
# )
# u1.role = 2
# u3.role = 1
# db.session.commit()
# # Sample issues
# i1 = Issue(
# title="Printer on fire!",
# text="Huge flames are shooting out of paper tray 1!!! Please bring fire extinguisher ASAP!!!",
# reporter=2
# )
# i2 = Issue(
# title="Computer not responding",
# text="My PC is showing the loading spinner and will not respond to keyboard or mouse input. It has been doing this for 6 weeks.",
# reporter=2
# )
# i3 = Issue(
# title="Please bring in nacho flavored Beanfields chips",
# text="We're not saying you're going to get addicted to our slamming NACHO, but we're also not going to say you won't. Nacho-lly we're biased since it is our best seller. NACHO just has unmatched taste that makes being cheesy, well, cool and vegan. The kinda vegan you want at your barbecue so you can say, 'yeah NACHO came with me. We're good like that.' Nacho average tortilla chip.",
# category=2,
# reporter=3
# )
# i4 = Issue(
# title="Clerk was rude and dismissive",
# text="She told me to wear a mask, and I don't wanna!",
# category=1,
# reporter=3
# )
# db.session.add_all([i1,i2,i3,i4])
# db.session.commit()
| 0
| 0
| 0
|
4cb1a20de656973fca2fc1192b2c5183563fa9cf
| 70
|
py
|
Python
|
pizza_auth/run.py
|
andimiller/pizza-auth
|
e36870a4edf84a203bbaad5911e2b0e5f605f60e
|
[
"MIT"
] | 2
|
2015-06-12T08:26:02.000Z
|
2015-09-09T00:25:59.000Z
|
pizza_auth/run.py
|
Sylnai/pizza-auth
|
e36870a4edf84a203bbaad5911e2b0e5f605f60e
|
[
"MIT"
] | 1
|
2021-06-15T20:28:49.000Z
|
2021-06-15T20:28:49.000Z
|
pizza_auth/run.py
|
Sylnai/pizza-auth
|
e36870a4edf84a203bbaad5911e2b0e5f605f60e
|
[
"MIT"
] | 2
|
2015-11-13T16:43:30.000Z
|
2016-09-15T18:39:05.000Z
|
from main import app
app.run(host='127.0.0.1', port=8090, debug=True)
| 23.333333
| 48
| 0.714286
|
from main import app
app.run(host='127.0.0.1', port=8090, debug=True)
| 0
| 0
| 0
|
0e46de584e9f7e867a5e9c740f4b6ef3e9ef32f6
| 3,584
|
py
|
Python
|
pymco/security/ssl.py
|
jantman/python-mcollective
|
ceb8f362bc8a1981b42696889250bed1cce07fea
|
[
"BSD-3-Clause"
] | 1
|
2015-07-29T00:35:51.000Z
|
2015-07-29T00:35:51.000Z
|
pymco/security/ssl.py
|
jantman/python-mcollective
|
ceb8f362bc8a1981b42696889250bed1cce07fea
|
[
"BSD-3-Clause"
] | null | null | null |
pymco/security/ssl.py
|
jantman/python-mcollective
|
ceb8f362bc8a1981b42696889250bed1cce07fea
|
[
"BSD-3-Clause"
] | null | null | null |
"""
:py:mod:`pymco.ssl`
-------------------
Contains SSL security provider plugin.
"""
from __future__ import print_function
import base64
import os
try:
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
except ImportError as exc:
print('You need install pycrypto for using SSL security provider')
raise exc
from .. import exc
from . import SecurityProvider
from .. import utils
class SSLProvider(SecurityProvider):
"""Provide SSL security provider plugin.
See
http://docs.puppetlabs.com/mcollective/reference/plugins/security_ssl.html
for further information.
"""
def sign(self, msg):
"""Implement :py:meth:`pymco.security.SecurityProvider.sign`."""
msg[':callerid'] = self.callerid
msg[':hash'] = self.get_hash(msg)
return msg
def verify(self, msg):
"""Implement :py:meth:`pymco.security.SecurityProvider.verify`."""
hash_ = SHA.new(msg[':body'].encode('utf8'))
verifier = PKCS1_v1_5.new(self.server_public_key)
signature = base64.b64decode(msg[':hash'])
if not verifier.verify(hash_, signature):
raise exc.VerificationError(
'Message {0} can\'t be verified'.format(msg))
return msg
def get_hash(self, msg):
"""Get the hash for the given message.
:arg pymco.message.Message msg: message to get hash for.
:return: message hash so the receiver can verify the message.
"""
hashed_signature = SHA.new(msg[':body'].encode('utf8'))
signer = PKCS1_v1_5.new(self.private_key)
hashed_signature = signer.sign(hashed_signature)
return base64.b64encode(hashed_signature)
@property
def callerid(self):
"""Property returning the MCollective SSL caller id.
As MCollective docs states, the caller ID will be the name of public
key filename, without the extension part.
"""
if not self._caller_id:
caller_id = os.path.basename(
self.config['plugin.ssl_client_public']).split('.')[0]
self._caller_id = 'cert={0}'.format(caller_id)
return self._caller_id
@property
def server_public_key(self):
"""Property returning the server public key after being loaded."""
return self._load_rsa_key(key='plugin.ssl_server_public',
cache=self._server_public_key)
@property
def private_key(self):
"""Property returning the private key after being loaded."""
return self._load_rsa_key(key='plugin.ssl_client_private',
cache=self._private_key)
@property
def serializer(self):
"""Property returning the serializer object.
Serailzer object should be any subclass
:py:class:`pymco.serializer.Serializer`, depending on configuration.
However, right now, only YAML serialization can be supported,
since the default serializer (Marshal) isn't portable.
"""
if not self._serializer:
self._serializer = self.config.get_serializer('plugin.ssl_serializer')
return self._serializer
| 32.581818
| 82
| 0.644252
|
"""
:py:mod:`pymco.ssl`
-------------------
Contains SSL security provider plugin.
"""
from __future__ import print_function
import base64
import os
try:
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
except ImportError as exc:
print('You need install pycrypto for using SSL security provider')
raise exc
from .. import exc
from . import SecurityProvider
from .. import utils
class SSLProvider(SecurityProvider):
"""Provide SSL security provider plugin.
See
http://docs.puppetlabs.com/mcollective/reference/plugins/security_ssl.html
for further information.
"""
def __init__(self, config):
super(SSLProvider, self).__init__(config=config)
self._private_key = None
self._server_public_key = None
self._caller_id = None
self._serializer = None
def sign(self, msg):
"""Implement :py:meth:`pymco.security.SecurityProvider.sign`."""
msg[':callerid'] = self.callerid
msg[':hash'] = self.get_hash(msg)
return msg
def verify(self, msg):
"""Implement :py:meth:`pymco.security.SecurityProvider.verify`."""
hash_ = SHA.new(msg[':body'].encode('utf8'))
verifier = PKCS1_v1_5.new(self.server_public_key)
signature = base64.b64decode(msg[':hash'])
if not verifier.verify(hash_, signature):
raise exc.VerificationError(
'Message {0} can\'t be verified'.format(msg))
return msg
def get_hash(self, msg):
"""Get the hash for the given message.
:arg pymco.message.Message msg: message to get hash for.
:return: message hash so the receiver can verify the message.
"""
hashed_signature = SHA.new(msg[':body'].encode('utf8'))
signer = PKCS1_v1_5.new(self.private_key)
hashed_signature = signer.sign(hashed_signature)
return base64.b64encode(hashed_signature)
@property
def callerid(self):
"""Property returning the MCollective SSL caller id.
As MCollective docs states, the caller ID will be the name of public
key filename, without the extension part.
"""
if not self._caller_id:
caller_id = os.path.basename(
self.config['plugin.ssl_client_public']).split('.')[0]
self._caller_id = 'cert={0}'.format(caller_id)
return self._caller_id
def _load_rsa_key(self, key, cache):
if not cache:
cache = self._server_public_key = utils.load_rsa_key(self.config[key])
return cache
@property
def server_public_key(self):
"""Property returning the server public key after being loaded."""
return self._load_rsa_key(key='plugin.ssl_server_public',
cache=self._server_public_key)
@property
def private_key(self):
"""Property returning the private key after being loaded."""
return self._load_rsa_key(key='plugin.ssl_client_private',
cache=self._private_key)
@property
def serializer(self):
"""Property returning the serializer object.
Serailzer object should be any subclass
:py:class:`pymco.serializer.Serializer`, depending on configuration.
However, right now, only YAML serialization can be supported,
since the default serializer (Marshal) isn't portable.
"""
if not self._serializer:
self._serializer = self.config.get_serializer('plugin.ssl_serializer')
return self._serializer
| 340
| 0
| 53
|
b1120b8ff6bc0605d09869d7d8a210da4ab8091e
| 1,356
|
py
|
Python
|
client/client.py
|
ArkEcosystem/ARK-Python-Client
|
98d78442b7b1088a533837d2cc674af8c2e6baae
|
[
"MIT"
] | 14
|
2018-06-15T06:09:17.000Z
|
2021-04-08T19:59:07.000Z
|
client/client.py
|
ArkEcosystem/ARK-Python-Client
|
98d78442b7b1088a533837d2cc674af8c2e6baae
|
[
"MIT"
] | 78
|
2018-06-15T07:59:46.000Z
|
2021-03-29T02:25:11.000Z
|
client/client.py
|
ArkEcosystem/ARK-Python-Client
|
98d78442b7b1088a533837d2cc674af8c2e6baae
|
[
"MIT"
] | 48
|
2018-06-14T15:46:59.000Z
|
2022-01-24T18:26:09.000Z
|
import inspect
import pkgutil
from importlib import import_module
from pathlib import Path
from client.connection import Connection
from client.exceptions import ArkParameterException
from client.resource import Resource
| 35.684211
| 84
| 0.608407
|
import inspect
import pkgutil
from importlib import import_module
from pathlib import Path
from client.connection import Connection
from client.exceptions import ArkParameterException
from client.resource import Resource
class ArkClient(object):
def __init__(self, hostname):
"""
:param string hostname: Node hostname. Examples: `http://127.0.0.1:4002` or
`http://my.domain.io/api/`. This is to allow people to server the api
on whatever url they want.
"""
self.connection = Connection(hostname)
self._import_api()
def _import_api(self):
"""
Dynamically imports API endpoints.
"""
modules = pkgutil.iter_modules([str(Path(__file__).parent / 'api')])
for _, name, _ in modules:
module = import_module('client.api.{}'.format(name))
for attr in dir(module):
# If attr name is `Resource`, skip it as it's a class and also has a
# subclass of Resource
if attr == 'Resource':
continue
attribute = getattr(module, attr)
if inspect.isclass(attribute) and issubclass(attribute, Resource):
# Set module class as a property on the client
setattr(self, name, attribute(self.connection))
| 0
| 1,111
| 23
|
f4b5e2b392444820619c6206589c11e19fa5d27c
| 1,821
|
py
|
Python
|
plugins/maya/load/load_camera.py
|
davidlatwe/reveries-config
|
4a282dd64a32a9b87bd1a070759b6425ff785d68
|
[
"MIT"
] | 3
|
2020-04-01T10:51:17.000Z
|
2021-08-05T18:35:23.000Z
|
plugins/maya/load/load_camera.py
|
davidlatwe/reveries-config
|
4a282dd64a32a9b87bd1a070759b6425ff785d68
|
[
"MIT"
] | null | null | null |
plugins/maya/load/load_camera.py
|
davidlatwe/reveries-config
|
4a282dd64a32a9b87bd1a070759b6425ff785d68
|
[
"MIT"
] | 1
|
2020-07-05T12:06:30.000Z
|
2020-07-05T12:06:30.000Z
|
import avalon.api
from reveries.maya import lib, capsule
from reveries.maya.plugins import ReferenceLoader
class CameraLoader(ReferenceLoader, avalon.api.Loader):
"""Specific loader for the reveries.camera family"""
label = "Reference camera"
order = -10
icon = "code-fork"
color = "orange"
hosts = ["maya"]
families = ["reveries.camera"]
representations = [
"mayaAscii",
"Alembic",
"FBX",
]
| 31.947368
| 74
| 0.500824
|
import avalon.api
from reveries.maya import lib, capsule
from reveries.maya.plugins import ReferenceLoader
class CameraLoader(ReferenceLoader, avalon.api.Loader):
"""Specific loader for the reveries.camera family"""
label = "Reference camera"
order = -10
icon = "code-fork"
color = "orange"
hosts = ["maya"]
families = ["reveries.camera"]
representations = [
"mayaAscii",
"Alembic",
"FBX",
]
def process_reference(self, context, name, namespace, group, options):
import maya.cmds as cmds
representation = context["representation"]
entry_path = self.file_path(representation)
nodes = cmds.file(entry_path,
namespace=namespace,
ignoreVersion=True,
sharedReferenceFile=False,
groupReference=True,
groupName=group,
reference=True,
lockReference=False,
returnNewNodes=True)
# Lock camera
camera = cmds.listRelatives(cmds.ls(type="camera", long=True),
parent=True,
fullPath=True)[0]
with capsule.ref_edit_unlock():
lib.lock_transform(camera, additional=["focalLength",
"cameraAperture",
"lensSqueezeRatio",
"shutterAngle",
"centerOfInterest"])
lib.lock_transform(group)
self[:] = nodes
def switch(self, container, representation):
self.update(container, representation)
| 1,308
| 0
| 54
|
c7e8cb15efaa4177f1474ef5a37bbf23c0a74d1c
| 3,064
|
py
|
Python
|
_pycharm_skeletons/renderdoc/D3D12OM.py
|
Lex-DRL/renderdoc-py-stubs
|
75d280e4f500ded506f3315a49fc432b37ab4fa6
|
[
"MIT"
] | null | null | null |
_pycharm_skeletons/renderdoc/D3D12OM.py
|
Lex-DRL/renderdoc-py-stubs
|
75d280e4f500ded506f3315a49fc432b37ab4fa6
|
[
"MIT"
] | null | null | null |
_pycharm_skeletons/renderdoc/D3D12OM.py
|
Lex-DRL/renderdoc-py-stubs
|
75d280e4f500ded506f3315a49fc432b37ab4fa6
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .SwigPyObject import SwigPyObject
class D3D12OM(SwigPyObject):
""" Describes the current state of the output-merger stage of the D3D12 pipeline. """
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
blendState = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A :class:`D3D12BlendState` with the details of the blend state."""
depthReadOnly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""``True`` if depth access to the depth-stencil target is read-only."""
depthStencilState = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A :class:`D3D12DepthStencilState` with the details of the depth-stencil state."""
depthTarget = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A :class:`D3D12View` with details of the bound depth-stencil target."""
multiSampleCount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The sample count used for rendering."""
multiSampleQuality = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The MSAA quality level used for rendering."""
renderTargets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A list of :class:`D3D12View` describing the bound render targets."""
stencilReadOnly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""``True`` if stenncil access to the depth-stencil target is read-only."""
this = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is ''
| 36.915663
| 108
| 0.649151
|
# encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .SwigPyObject import SwigPyObject
class D3D12OM(SwigPyObject):
""" Describes the current state of the output-merger stage of the D3D12 pipeline. """
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
blendState = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A :class:`D3D12BlendState` with the details of the blend state."""
depthReadOnly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""``True`` if depth access to the depth-stencil target is read-only."""
depthStencilState = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A :class:`D3D12DepthStencilState` with the details of the depth-stencil state."""
depthTarget = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A :class:`D3D12View` with details of the bound depth-stencil target."""
multiSampleCount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The sample count used for rendering."""
multiSampleQuality = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The MSAA quality level used for rendering."""
renderTargets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A list of :class:`D3D12View` describing the bound render targets."""
stencilReadOnly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""``True`` if stenncil access to the depth-stencil target is read-only."""
this = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is ''
| 53
| 0
| 27
|
212a25f9662c6caa2c9493596023de0bdd71b50b
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/jedi/inference/param.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/jedi/inference/param.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/jedi/inference/param.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/85/03/a8/e8d26fae6c7b3b8548be18e886767b946dd069a8481930ee9dda2adccd
| 96
| 96
| 0.895833
|
/home/runner/.cache/pip/pool/85/03/a8/e8d26fae6c7b3b8548be18e886767b946dd069a8481930ee9dda2adccd
| 0
| 0
| 0
|
e1ec4bfdb1db438cb20b46e1d2458eda58f02dfb
| 1,406
|
py
|
Python
|
Paper_Plots/Paper_Plots/Case3/plots_case3.py
|
RuiNian7319/Woodberry_Distillation
|
4ee8ab9de8e313bca48d9a7af9393abcad85ece4
|
[
"MIT"
] | 3
|
2019-07-18T06:43:49.000Z
|
2021-05-13T12:34:24.000Z
|
Paper_Plots/Paper_Plots/Case3/plots_case3.py
|
HTL2018/Woodberry_Distillation
|
4ee8ab9de8e313bca48d9a7af9393abcad85ece4
|
[
"MIT"
] | null | null | null |
Paper_Plots/Paper_Plots/Case3/plots_case3.py
|
HTL2018/Woodberry_Distillation
|
4ee8ab9de8e313bca48d9a7af9393abcad85ece4
|
[
"MIT"
] | 1
|
2020-08-12T13:12:00.000Z
|
2020-08-12T13:12:00.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
fonts = {"family": "serif",
"weight": "normal",
"size": "12"}
plt.rc('font', **fonts)
plt.rc('text', usetex=True)
ftc_noiseless = np.loadtxt('ftc_noiseless_case3.csv')
ftc_noise = np.loadtxt('ftc_noise_case3.csv')
no_ftc_noiseless = np.loadtxt('no_ftc_noiseless_case3.csv')
x = np.linspace(0, ftc_noiseless.shape[0], ftc_noiseless.shape[0] - 50)
with sns.cubehelix_palette(8, start=.5, rot=-.75, reverse=True):
ax = plt.subplot(111)
ax.plot(x, no_ftc_noiseless[50:, 0], label=r'$X_D$ No FTC (Noiseless)', linestyle='-.')
ax.plot(x, ftc_noise[50:, 0], label=r'$X_D$ FTC', linestyle='--')
ax.plot(x, ftc_noiseless[50:, 0], label=r'$X_D$ FTC (Noiseless)')
ax.plot(x, no_ftc_noiseless[50:, 1], linestyle='-.', label=r'$X_B$ No FTC (Noiseless)')
ax.plot(x, ftc_noise[50:, 1], linestyle='--', label=r'$X_B$ FTC')
ax.plot(x, ftc_noiseless[50:, 1], label=r'$X_B$ FTC (Noiseless)')
plt.xlabel(r'Time, \textit{t} (min)')
plt.ylabel(r'\%MeOH, $\textit{X}_D$ (wt. \%)')
handles, labels = ax.get_legend_handles_labels()
plt.legend(flip(handles, 2), flip(labels, 2), loc=6, ncol=2, prop={'size': 12}, frameon=False)
plt.savefig('Case3_Plot.eps', format='eps', dpi=1000)
plt.show()
| 32.697674
| 94
| 0.66074
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
fonts = {"family": "serif",
"weight": "normal",
"size": "12"}
plt.rc('font', **fonts)
plt.rc('text', usetex=True)
ftc_noiseless = np.loadtxt('ftc_noiseless_case3.csv')
ftc_noise = np.loadtxt('ftc_noise_case3.csv')
no_ftc_noiseless = np.loadtxt('no_ftc_noiseless_case3.csv')
x = np.linspace(0, ftc_noiseless.shape[0], ftc_noiseless.shape[0] - 50)
with sns.cubehelix_palette(8, start=.5, rot=-.75, reverse=True):
ax = plt.subplot(111)
ax.plot(x, no_ftc_noiseless[50:, 0], label=r'$X_D$ No FTC (Noiseless)', linestyle='-.')
ax.plot(x, ftc_noise[50:, 0], label=r'$X_D$ FTC', linestyle='--')
ax.plot(x, ftc_noiseless[50:, 0], label=r'$X_D$ FTC (Noiseless)')
ax.plot(x, no_ftc_noiseless[50:, 1], linestyle='-.', label=r'$X_B$ No FTC (Noiseless)')
ax.plot(x, ftc_noise[50:, 1], linestyle='--', label=r'$X_B$ FTC')
ax.plot(x, ftc_noiseless[50:, 1], label=r'$X_B$ FTC (Noiseless)')
plt.xlabel(r'Time, \textit{t} (min)')
plt.ylabel(r'\%MeOH, $\textit{X}_D$ (wt. \%)')
def flip(items, ncol):
return itertools.chain(*[items[i::ncol] for i in range(ncol)])
handles, labels = ax.get_legend_handles_labels()
plt.legend(flip(handles, 2), flip(labels, 2), loc=6, ncol=2, prop={'size': 12}, frameon=False)
plt.savefig('Case3_Plot.eps', format='eps', dpi=1000)
plt.show()
| 68
| 0
| 23
|
f30bda256908f99bddedb1b9e4707e5b737a2cd2
| 2,277
|
py
|
Python
|
realtime_hsv_adjust.py
|
aseber/OpenCV
|
9b5deef24acdc3664e7989e78d8935bbd140a880
|
[
"Apache-2.0"
] | 2
|
2016-03-27T01:50:05.000Z
|
2020-05-17T11:44:31.000Z
|
realtime_hsv_adjust.py
|
aseber/OpenCV
|
9b5deef24acdc3664e7989e78d8935bbd140a880
|
[
"Apache-2.0"
] | null | null | null |
realtime_hsv_adjust.py
|
aseber/OpenCV
|
9b5deef24acdc3664e7989e78d8935bbd140a880
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
if __name__ == '__main__':
main()
| 41.4
| 92
| 0.621871
|
import cv2
import numpy as np
class GUI():
def __init__(self):
self.switch_window = cv2.namedWindow("Values")
self.hmin_bar = cv2.createTrackbar('Hue Min', 'Values', 0, 255, self.nothing)
self.hmax_bar = cv2.createTrackbar('Hue Max', 'Values', 0, 255, self.nothing)
hue_min = cv2.setTrackbarPos('Hue Max','Values', 255)
self.vmin_bar = cv2.createTrackbar('Value Min', 'Values', 0, 255, self.nothing)
self.vmax_bar = cv2.createTrackbar('Value Max', 'Values', 0, 255, self.nothing)
hue_min = cv2.setTrackbarPos('Value Max','Values', 255)
self.smin_bar = cv2.createTrackbar('Saturation Min', 'Values', 0, 255, self.nothing)
self.smax_bar = cv2.createTrackbar('Saturation Max', 'Values', 0, 255, self.nothing)
hue_min = cv2.setTrackbarPos('Saturation Max','Values', 255)
# self.realtime_button = cv2.createButton('Realtime', 'Values')
def run(self):
camera = cv2.VideoCapture(-1)
while camera.isOpened():
_, originalImage = camera.read()
hsvImage = cv2.cvtColor(originalImage, cv2.COLOR_BGR2HSV)
hue_min = cv2.getTrackbarPos('Hue Min', 'Values')
hue_max = cv2.getTrackbarPos('Hue Max', 'Values')
value_min = cv2.getTrackbarPos('Value Min', 'Values')
value_max = cv2.getTrackbarPos('Value Max', 'Values')
saturation_min = cv2.getTrackbarPos('Saturation Min', 'Values')
saturation_max = cv2.getTrackbarPos('Saturation Max', 'Values')
lower = np.array([hue_min, saturation_min, value_min])
upper = np.array([hue_max, saturation_max, value_max])
# erode_element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# dilate_element = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))
mask = cv2.inRange(hsvImage, lower, upper)
# mask = cv2.erode(mask, erode_element)
# mask = cv2.dilate(mask, dilate_element)
cv2.imshow("Original", originalImage)
cv2.imshow("Mask", mask)
cv2.waitKey(5)
cv2.destroyAllWindows()
def nothing(self, x):
pass
def main():
user_gui = GUI()
user_gui.run()
if __name__ == '__main__':
main()
| 2,092
| -9
| 126
|
d247f5cc81772dbae25e41899a09b6f15408c0a8
| 1,084
|
py
|
Python
|
wunderpy3/task_comments_endpoint.py
|
lwedwards3/dhp_sync
|
71527142509189915ed39f13975594dd9f837735
|
[
"MIT"
] | null | null | null |
wunderpy3/task_comments_endpoint.py
|
lwedwards3/dhp_sync
|
71527142509189915ed39f13975594dd9f837735
|
[
"MIT"
] | null | null | null |
wunderpy3/task_comments_endpoint.py
|
lwedwards3/dhp_sync
|
71527142509189915ed39f13975594dd9f837735
|
[
"MIT"
] | null | null | null |
'''
Encapsulates all tasks that can be run against the 'notes' endpoint
'''
| 32.848485
| 105
| 0.675277
|
'''
Encapsulates all tasks that can be run against the 'notes' endpoint
'''
def get_task_comments(client, task_id):
params = {
'task_id' : int(task_id)
}
response = client.authenticated_request(client.api.Endpoints.TASK_COMMENTS, params=params)
assert response.status_code == 200
return response.json()
def get_list_comments(client, list_id):
params = {
'list_id' : int(list_id)
}
response = client.authenticated_request(client.api.Endpoints.TASK_COMMENTS, params=params)
assert response.status_code == 200
return response.json()
def get_task_comment(client, comment_id):
endpoint = '/'.join([client.api.Endpoints.TASK_COMMENTS, str(comment_id)])
response = client.authenticated_request(endpoint)
return response.json()
def create_comment(client, task_id, text):
data = {
'task_id' : int(task_id),
'text' : text,
}
response = client.authenticated_request(client.api.Endpoints.TASK_COMMENTS, method='POST', data=data)
return response.json()
| 916
| 0
| 91
|
10dad59116c1f790a9057cde007a47ef33d45728
| 603
|
py
|
Python
|
blogApp/migrations/0007_auto_20190703_1905.py
|
blogdevteam/Blog
|
93a4343261063fedd7a0ef2c60d4adb50c48e4ae
|
[
"MIT"
] | 4
|
2019-07-07T15:09:54.000Z
|
2020-05-24T11:02:31.000Z
|
blogApp/migrations/0007_auto_20190703_1905.py
|
blogdevteam/Blog
|
93a4343261063fedd7a0ef2c60d4adb50c48e4ae
|
[
"MIT"
] | null | null | null |
blogApp/migrations/0007_auto_20190703_1905.py
|
blogdevteam/Blog
|
93a4343261063fedd7a0ef2c60d4adb50c48e4ae
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-07-03 11:05
from django.db import migrations, models
| 22.333333
| 62
| 0.552239
|
# Generated by Django 2.2.2 on 2019-07-03 11:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogApp', '0006_delete_admin'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='image_height',
),
migrations.RemoveField(
model_name='user',
name='image_width',
),
migrations.AlterField(
model_name='user',
name='avatar',
field=models.CharField(max_length=100, null=True),
),
]
| 0
| 489
| 23
|
aa2b5855608326fd65f8812d1bf3493579925c53
| 1,010
|
py
|
Python
|
Patterns/NeilSayok.py
|
sanchit781/HACKTOBERFEST2021_PATTERN
|
c457eb2a1c7b729bdaa26ade7d4c7eb4092291e2
|
[
"MIT"
] | 229
|
2021-09-10T13:24:47.000Z
|
2022-03-18T16:54:29.000Z
|
Patterns/NeilSayok.py
|
swapnilnarad2000/HACKTOBERFEST2021_PATTERN
|
567f99bbc3d3bb9d03333422f5e1392251a41439
|
[
"MIT"
] | 164
|
2021-09-10T12:04:39.000Z
|
2021-10-29T21:20:42.000Z
|
Patterns/NeilSayok.py
|
swapnilnarad2000/HACKTOBERFEST2021_PATTERN
|
567f99bbc3d3bb9d03333422f5e1392251a41439
|
[
"MIT"
] | 567
|
2021-09-10T17:35:27.000Z
|
2021-12-11T12:45:43.000Z
|
'''
Program to convert any Image to Pattern
Input your Image URL or just press enter to see the default image.
Created by Sayok Dey Majumder.
Github User handle : NeilSayok
Link to profile: https://github.com/NeilSayok
'''
import cv2
import imutils
import urllib.request
import numpy as np
url = input("Enter Url to your Image or Press Enter if you want to use default image\n")
if(url == ""):
url = 'https://avatars.githubusercontent.com/u/21328143?v=4'
req = urllib.request.urlopen(url)
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
img = cv2.imdecode(arr, -1) # 'Load it as it is'
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = imutils.resize(img, width=300,height=300)
thresh = 127
im_bw = 255 - cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]
print(im_bw.shape)
with open("Out.txt", "w") as fh:
for a in im_bw:
for b in a:
if(b == 255):
fh.write("*")
else:
fh.write(" ")
fh.write("\n")
| 20.612245
| 88
| 0.646535
|
'''
Program to convert any Image to Pattern
Input your Image URL or just press enter to see the default image.
Created by Sayok Dey Majumder.
Github User handle : NeilSayok
Link to profile: https://github.com/NeilSayok
'''
import cv2
import imutils
import urllib.request
import numpy as np
url = input("Enter Url to your Image or Press Enter if you want to use default image\n")
if(url == ""):
url = 'https://avatars.githubusercontent.com/u/21328143?v=4'
req = urllib.request.urlopen(url)
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
img = cv2.imdecode(arr, -1) # 'Load it as it is'
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = imutils.resize(img, width=300,height=300)
thresh = 127
im_bw = 255 - cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]
print(im_bw.shape)
with open("Out.txt", "w") as fh:
for a in im_bw:
for b in a:
if(b == 255):
fh.write("*")
else:
fh.write(" ")
fh.write("\n")
| 0
| 0
| 0
|
d9aadf3cb80ac0f1700d00242c3a7ee13aeffba4
| 12,727
|
py
|
Python
|
poky/scripts/lib/buildstats.py
|
buildlinux/unityos
|
dcbe232d0589013d77a62c33959d6a69f9bfbc5e
|
[
"Apache-2.0"
] | 53
|
2018-02-28T08:51:32.000Z
|
2022-02-28T06:49:23.000Z
|
scripts/lib/buildstats.py
|
nareshgbhat/luv-yocto
|
48976c54238dda0791e274927371265d259c0e5a
|
[
"MIT"
] | 27
|
2018-01-25T00:26:53.000Z
|
2020-08-09T05:20:04.000Z
|
scripts/lib/buildstats.py
|
nareshgbhat/luv-yocto
|
48976c54238dda0791e274927371265d259c0e5a
|
[
"MIT"
] | 51
|
2018-02-21T04:46:08.000Z
|
2022-03-02T04:20:41.000Z
|
#
# Copyright (c) 2017, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
"""Functionality for analyzing buildstats"""
import json
import logging
import os
import re
from collections import namedtuple,OrderedDict
from statistics import mean
log = logging.getLogger()
taskdiff_fields = ('pkg', 'pkg_op', 'task', 'task_op', 'value1', 'value2',
'absdiff', 'reldiff')
TaskDiff = namedtuple('TaskDiff', ' '.join(taskdiff_fields))
class BSError(Exception):
"""Error handling of buildstats"""
pass
class BSTaskAggregate(object):
"""Class representing multiple runs of the same task"""
properties = ('cputime', 'walltime', 'read_bytes', 'write_bytes',
'read_ops', 'write_ops')
def append(self, task):
"""Append new task"""
# Reset pre-calculated properties
assert isinstance(task, BSTask), "Type is '{}' instead of 'BSTask'".format(type(task))
self._properties = {}
self._tasks.append(task)
class BSRecipe(object):
"""Class representing buildstats of one recipe"""
def aggregate(self, bsrecipe):
"""Aggregate data of another recipe buildstats"""
if self.nevr != bsrecipe.nevr:
raise ValueError("Refusing to aggregate buildstats, recipe version "
"differs: {} vs. {}".format(self.nevr, bsrecipe.nevr))
if set(self.tasks.keys()) != set(bsrecipe.tasks.keys()):
raise ValueError("Refusing to aggregate buildstats, set of tasks "
"in {} differ".format(self.name))
for taskname, taskdata in bsrecipe.tasks.items():
if not isinstance(self.tasks[taskname], BSTaskAggregate):
self.tasks[taskname] = BSTaskAggregate([self.tasks[taskname]])
self.tasks[taskname].append(taskdata)
@property
class BuildStats(dict):
"""Class representing buildstats of one build"""
@property
def num_tasks(self):
"""Get number of tasks"""
num = 0
for recipe in self.values():
num += len(recipe.tasks)
return num
@classmethod
def from_json(cls, bs_json):
"""Create new BuildStats object from JSON object"""
buildstats = cls()
for recipe in bs_json:
if recipe['name'] in buildstats:
raise BSError("Cannot handle multiple versions of the same "
"package ({})".format(recipe['name']))
bsrecipe = BSRecipe(recipe['name'], recipe['epoch'],
recipe['version'], recipe['revision'])
for task, data in recipe['tasks'].items():
bsrecipe.tasks[task] = BSTask(data)
buildstats[recipe['name']] = bsrecipe
return buildstats
@staticmethod
def from_file_json(path):
"""Load buildstats from a JSON file"""
with open(path) as fobj:
bs_json = json.load(fobj)
return BuildStats.from_json(bs_json)
@staticmethod
def split_nevr(nevr):
"""Split name and version information from recipe "nevr" string"""
n_e_v, revision = nevr.rsplit('-', 1)
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
n_e_v)
if not match:
# If we're not able to parse a version starting with a number, just
# take the part after last dash
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
n_e_v)
name = match.group('name')
version = match.group('version')
epoch = match.group('epoch')
return name, epoch, version, revision
@classmethod
def from_dir(cls, path):
"""Load buildstats from a buildstats directory"""
if not os.path.isfile(os.path.join(path, 'build_stats')):
raise BSError("{} does not look like a buildstats directory".format(path))
log.debug("Reading buildstats directory %s", path)
buildstats = cls()
subdirs = os.listdir(path)
for dirname in subdirs:
recipe_dir = os.path.join(path, dirname)
if not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = cls.split_nevr(dirname)
bsrecipe = BSRecipe(name, epoch, version, revision)
for task in os.listdir(recipe_dir):
bsrecipe.tasks[task] = BSTask.from_file(
os.path.join(recipe_dir, task))
if name in buildstats:
raise BSError("Cannot handle multiple versions of the same "
"package ({})".format(name))
buildstats[name] = bsrecipe
return buildstats
def aggregate(self, buildstats):
"""Aggregate other buildstats into this"""
if set(self.keys()) != set(buildstats.keys()):
raise ValueError("Refusing to aggregate buildstats, set of "
"recipes is different")
for pkg, data in buildstats.items():
self[pkg].aggregate(data)
def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None):
"""Compare the tasks of two buildstats"""
tasks_diff = []
pkgs = set(bs1.keys()).union(set(bs2.keys()))
for pkg in pkgs:
tasks1 = bs1[pkg].tasks if pkg in bs1 else {}
tasks2 = bs2[pkg].tasks if pkg in bs2 else {}
if not tasks1:
pkg_op = '+'
elif not tasks2:
pkg_op = '-'
else:
pkg_op = ' '
for task in set(tasks1.keys()).union(set(tasks2.keys())):
task_op = ' '
if task in tasks1:
val1 = getattr(bs1[pkg].tasks[task], stat_attr)
else:
task_op = '+'
val1 = 0
if task in tasks2:
val2 = getattr(bs2[pkg].tasks[task], stat_attr)
else:
val2 = 0
task_op = '-'
if val1 == 0:
reldiff = float('inf')
else:
reldiff = 100 * (val2 - val1) / val1
if min_val and max(val1, val2) < min_val:
log.debug("Filtering out %s:%s (%s)", pkg, task,
max(val1, val2))
continue
if min_absdiff and abs(val2 - val1) < min_absdiff:
log.debug("Filtering out %s:%s (difference of %s)", pkg, task,
val2-val1)
continue
tasks_diff.append(TaskDiff(pkg, pkg_op, task, task_op, val1, val2,
val2-val1, reldiff))
return tasks_diff
class BSVerDiff(object):
"""Class representing recipe version differences between two buildstats"""
| 36.362857
| 95
| 0.552055
|
#
# Copyright (c) 2017, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
"""Functionality for analyzing buildstats"""
import json
import logging
import os
import re
from collections import namedtuple,OrderedDict
from statistics import mean
log = logging.getLogger()
taskdiff_fields = ('pkg', 'pkg_op', 'task', 'task_op', 'value1', 'value2',
'absdiff', 'reldiff')
TaskDiff = namedtuple('TaskDiff', ' '.join(taskdiff_fields))
class BSError(Exception):
"""Error handling of buildstats"""
pass
class BSTask(dict):
def __init__(self, *args, **kwargs):
self['start_time'] = None
self['elapsed_time'] = None
self['status'] = None
self['iostat'] = {}
self['rusage'] = {}
self['child_rusage'] = {}
super(BSTask, self).__init__(*args, **kwargs)
@property
def cputime(self):
"""Sum of user and system time taken by the task"""
rusage = self['rusage']['ru_stime'] + self['rusage']['ru_utime']
if self['child_rusage']:
# Child rusage may have been optimized out
return rusage + self['child_rusage']['ru_stime'] + self['child_rusage']['ru_utime']
else:
return rusage
@property
def walltime(self):
"""Elapsed wall clock time"""
return self['elapsed_time']
@property
def read_bytes(self):
"""Bytes read from the block layer"""
return self['iostat']['read_bytes']
@property
def write_bytes(self):
"""Bytes written to the block layer"""
return self['iostat']['write_bytes']
@property
def read_ops(self):
"""Number of read operations on the block layer"""
if self['child_rusage']:
# Child rusage may have been optimized out
return self['rusage']['ru_inblock'] + self['child_rusage']['ru_inblock']
else:
return self['rusage']['ru_inblock']
@property
def write_ops(self):
"""Number of write operations on the block layer"""
if self['child_rusage']:
# Child rusage may have been optimized out
return self['rusage']['ru_oublock'] + self['child_rusage']['ru_oublock']
else:
return self['rusage']['ru_oublock']
@classmethod
def from_file(cls, buildstat_file):
"""Read buildstat text file"""
bs_task = cls()
log.debug("Reading task buildstats from %s", buildstat_file)
end_time = None
with open(buildstat_file) as fobj:
for line in fobj.readlines():
key, val = line.split(':', 1)
val = val.strip()
if key == 'Started':
start_time = float(val)
bs_task['start_time'] = start_time
elif key == 'Ended':
end_time = float(val)
elif key.startswith('IO '):
split = key.split()
bs_task['iostat'][split[1]] = int(val)
elif key.find('rusage') >= 0:
split = key.split()
ru_key = split[-1]
if ru_key in ('ru_stime', 'ru_utime'):
val = float(val)
else:
val = int(val)
ru_type = 'rusage' if split[0] == 'rusage' else \
'child_rusage'
bs_task[ru_type][ru_key] = val
elif key == 'Status':
bs_task['status'] = val
if end_time is not None and start_time is not None:
bs_task['elapsed_time'] = end_time - start_time
else:
raise BSError("{} looks like a invalid buildstats file".format(buildstat_file))
return bs_task
class BSTaskAggregate(object):
"""Class representing multiple runs of the same task"""
properties = ('cputime', 'walltime', 'read_bytes', 'write_bytes',
'read_ops', 'write_ops')
def __init__(self, tasks=None):
self._tasks = tasks or []
self._properties = {}
def __getattr__(self, name):
if name in self.properties:
if name not in self._properties:
# Calculate properties on demand only. We only provide mean
# value, so far
self._properties[name] = mean([getattr(t, name) for t in self._tasks])
return self._properties[name]
else:
raise AttributeError("'BSTaskAggregate' has no attribute '{}'".format(name))
def append(self, task):
"""Append new task"""
# Reset pre-calculated properties
assert isinstance(task, BSTask), "Type is '{}' instead of 'BSTask'".format(type(task))
self._properties = {}
self._tasks.append(task)
class BSRecipe(object):
"""Class representing buildstats of one recipe"""
def __init__(self, name, epoch, version, revision):
self.name = name
self.epoch = epoch
self.version = version
self.revision = revision
if epoch is None:
self.evr = "{}-{}".format(version, revision)
else:
self.evr = "{}_{}-{}".format(epoch, version, revision)
self.tasks = {}
def aggregate(self, bsrecipe):
"""Aggregate data of another recipe buildstats"""
if self.nevr != bsrecipe.nevr:
raise ValueError("Refusing to aggregate buildstats, recipe version "
"differs: {} vs. {}".format(self.nevr, bsrecipe.nevr))
if set(self.tasks.keys()) != set(bsrecipe.tasks.keys()):
raise ValueError("Refusing to aggregate buildstats, set of tasks "
"in {} differ".format(self.name))
for taskname, taskdata in bsrecipe.tasks.items():
if not isinstance(self.tasks[taskname], BSTaskAggregate):
self.tasks[taskname] = BSTaskAggregate([self.tasks[taskname]])
self.tasks[taskname].append(taskdata)
@property
def nevr(self):
return self.name + '-' + self.evr
class BuildStats(dict):
"""Class representing buildstats of one build"""
@property
def num_tasks(self):
"""Get number of tasks"""
num = 0
for recipe in self.values():
num += len(recipe.tasks)
return num
@classmethod
def from_json(cls, bs_json):
"""Create new BuildStats object from JSON object"""
buildstats = cls()
for recipe in bs_json:
if recipe['name'] in buildstats:
raise BSError("Cannot handle multiple versions of the same "
"package ({})".format(recipe['name']))
bsrecipe = BSRecipe(recipe['name'], recipe['epoch'],
recipe['version'], recipe['revision'])
for task, data in recipe['tasks'].items():
bsrecipe.tasks[task] = BSTask(data)
buildstats[recipe['name']] = bsrecipe
return buildstats
@staticmethod
def from_file_json(path):
"""Load buildstats from a JSON file"""
with open(path) as fobj:
bs_json = json.load(fobj)
return BuildStats.from_json(bs_json)
@staticmethod
def split_nevr(nevr):
"""Split name and version information from recipe "nevr" string"""
n_e_v, revision = nevr.rsplit('-', 1)
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
n_e_v)
if not match:
# If we're not able to parse a version starting with a number, just
# take the part after last dash
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
n_e_v)
name = match.group('name')
version = match.group('version')
epoch = match.group('epoch')
return name, epoch, version, revision
@classmethod
def from_dir(cls, path):
"""Load buildstats from a buildstats directory"""
if not os.path.isfile(os.path.join(path, 'build_stats')):
raise BSError("{} does not look like a buildstats directory".format(path))
log.debug("Reading buildstats directory %s", path)
buildstats = cls()
subdirs = os.listdir(path)
for dirname in subdirs:
recipe_dir = os.path.join(path, dirname)
if not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = cls.split_nevr(dirname)
bsrecipe = BSRecipe(name, epoch, version, revision)
for task in os.listdir(recipe_dir):
bsrecipe.tasks[task] = BSTask.from_file(
os.path.join(recipe_dir, task))
if name in buildstats:
raise BSError("Cannot handle multiple versions of the same "
"package ({})".format(name))
buildstats[name] = bsrecipe
return buildstats
def aggregate(self, buildstats):
"""Aggregate other buildstats into this"""
if set(self.keys()) != set(buildstats.keys()):
raise ValueError("Refusing to aggregate buildstats, set of "
"recipes is different")
for pkg, data in buildstats.items():
self[pkg].aggregate(data)
def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None):
"""Compare the tasks of two buildstats"""
tasks_diff = []
pkgs = set(bs1.keys()).union(set(bs2.keys()))
for pkg in pkgs:
tasks1 = bs1[pkg].tasks if pkg in bs1 else {}
tasks2 = bs2[pkg].tasks if pkg in bs2 else {}
if not tasks1:
pkg_op = '+'
elif not tasks2:
pkg_op = '-'
else:
pkg_op = ' '
for task in set(tasks1.keys()).union(set(tasks2.keys())):
task_op = ' '
if task in tasks1:
val1 = getattr(bs1[pkg].tasks[task], stat_attr)
else:
task_op = '+'
val1 = 0
if task in tasks2:
val2 = getattr(bs2[pkg].tasks[task], stat_attr)
else:
val2 = 0
task_op = '-'
if val1 == 0:
reldiff = float('inf')
else:
reldiff = 100 * (val2 - val1) / val1
if min_val and max(val1, val2) < min_val:
log.debug("Filtering out %s:%s (%s)", pkg, task,
max(val1, val2))
continue
if min_absdiff and abs(val2 - val1) < min_absdiff:
log.debug("Filtering out %s:%s (difference of %s)", pkg, task,
val2-val1)
continue
tasks_diff.append(TaskDiff(pkg, pkg_op, task, task_op, val1, val2,
val2-val1, reldiff))
return tasks_diff
class BSVerDiff(object):
"""Class representing recipe version differences between two buildstats"""
def __init__(self, bs1, bs2):
RecipeVerDiff = namedtuple('RecipeVerDiff', 'left right')
recipes1 = set(bs1.keys())
recipes2 = set(bs2.keys())
self.new = dict([(r, bs2[r]) for r in sorted(recipes2 - recipes1)])
self.dropped = dict([(r, bs1[r]) for r in sorted(recipes1 - recipes2)])
self.echanged = {}
self.vchanged = {}
self.rchanged = {}
self.unchanged = {}
self.empty_diff = False
common = recipes2.intersection(recipes1)
if common:
for recipe in common:
rdiff = RecipeVerDiff(bs1[recipe], bs2[recipe])
if bs1[recipe].epoch != bs2[recipe].epoch:
self.echanged[recipe] = rdiff
elif bs1[recipe].version != bs2[recipe].version:
self.vchanged[recipe] = rdiff
elif bs1[recipe].revision != bs2[recipe].revision:
self.rchanged[recipe] = rdiff
else:
self.unchanged[recipe] = rdiff
if len(recipes1) == len(recipes2) == len(self.unchanged):
self.empty_diff = True
def __bool__(self):
return not self.empty_diff
| 2,290
| 3,013
| 182
|
43f48d073e76e3341579b198e6e0cf6ab06adbf3
| 9,011
|
py
|
Python
|
pretrain_style_enc/style_enc_train.py
|
ShusenTang/WriteLikeYou
|
a335b02f806d846ba85a6f37f990eb8c2d57b3b6
|
[
"Apache-2.0"
] | 9
|
2021-01-20T02:52:21.000Z
|
2022-01-12T02:55:06.000Z
|
pretrain_style_enc/style_enc_train.py
|
ShusenTang/WriteLikeYou
|
a335b02f806d846ba85a6f37f990eb8c2d57b3b6
|
[
"Apache-2.0"
] | 2
|
2021-06-30T03:40:28.000Z
|
2022-02-10T09:20:48.000Z
|
pretrain_style_enc/style_enc_train.py
|
ShusenTang/WriteLikeYou
|
a335b02f806d846ba85a6f37f990eb8c2d57b3b6
|
[
"Apache-2.0"
] | 2
|
2021-09-05T15:10:49.000Z
|
2021-11-02T11:08:22.000Z
|
import os
import time
import random
import ast
import json
import argparse
import numpy as np
import tensorflow as tf
from style_enc_model import Style_Enc_Model
from data import CHN_Style_DataLoader
from model_utils import save_model, reset_graph
NPZ_DIR = "../WriteLikeYouData/npz_relative_dist/CASIA_rdp4.0"
assert os.path.exists(NPZ_DIR)
print("NPZ_DIR:", NPZ_DIR)
def train(sess, model, valid_model, train_dataloader, valid_dataloader, log_root):
"""Train a model."""
# Setup summary writer.
os.makedirs(log_root, exist_ok=True)
train_summary_writer = tf.summary.FileWriter(log_root + "/train_log", sess.graph)
valid_summary_writer = tf.summary.FileWriter(log_root + "/valid_log")
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
t_vars = tf.trainable_variables()
count_t_vars = 0
for var in t_vars:
num_param = np.prod(var.get_shape().as_list())
count_t_vars += num_param
print('%s %s %d' % (var.name, str(var.get_shape()), num_param))
print('Total trainable variables %d.' % count_t_vars)
best_valid_loss = 999999.0
MIN_LR, DECAY = 0.0000001, 0.9999
for step in range(1000000):
start = time.time()
batch_zi_array, batch_len_array = train_dataloader.get_random_batch_data()
curr_learning_rate = (model.init_lr - MIN_LR) * (DECAY ** step) + MIN_LR
feed = {
model.input_data: batch_zi_array,
model.seq_lens: batch_len_array,
model.lr: curr_learning_rate,
}
# training
(_, loss, ac_loss, mhe_loss, summ_str) = sess.run([model.train_op, model.loss, model.ac_loss, model.mhe_loss, model.summ], feed)
train_summary_writer.add_summary(summ_str, step)
train_summary_writer.flush()
# log
if step % 50 == 0 and step > 0:
print("Train step %d, lr:%.6f, loss: %.4f, ac_loss: %.4f, mhe_loss: %.4f; train time: %.2fs" % (
step, curr_learning_rate, loss, ac_loss, mhe_loss, time.time() - start))
# validation
if step % 500 == 0 and step > 0:
start = time.time()
print("validating...", log_root)
valid_loss, valid_ac_loss, valid_mhe_loss = evaluate_model(sess, valid_model, valid_dataloader)
valid_loss_summ = tf.summary.Summary()
valid_loss_summ.value.add(tag='valid_loss', simple_value=float(valid_loss))
valid_loss_summ.value.add(tag='valid_ac_loss', simple_value=float(valid_ac_loss))
valid_loss_summ.value.add(tag='valid_mhe_loss', simple_value=float(valid_mhe_loss))
print("Best valid loss: %.4f, loss: %.4f, ac_loss: %.4f, mhe_loss: %.4f; valid time: %.2fs" % (
best_valid_loss, valid_loss, valid_ac_loss, valid_mhe_loss, time.time() - start))
valid_summary_writer.add_summary(valid_loss_summ, step)
valid_summary_writer.flush()
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
print("Better model, best_valid_loss: %.4f" % best_valid_loss)
save_model(sess, saver, log_root, step)
if __name__ == '__main__':
main()
| 37.861345
| 136
| 0.642215
|
import os
import time
import random
import ast
import json
import argparse
import numpy as np
import tensorflow as tf
from style_enc_model import Style_Enc_Model
from data import CHN_Style_DataLoader
from model_utils import save_model, reset_graph
NPZ_DIR = "../WriteLikeYouData/npz_relative_dist/CASIA_rdp4.0"
assert os.path.exists(NPZ_DIR)
print("NPZ_DIR:", NPZ_DIR)
def print_save_args(args, save_dir, readme=""):
os.makedirs(save_dir, exist_ok=True)
args_dict = vars(args)
for key in args_dict.keys():
print(key, "=", args_dict[key])
with open(os.path.join(save_dir, 'model_config.json'), 'w') as f:
json.dump(args_dict, f, indent=True)
if readme != "":
with open(os.path.join(save_dir, 'readme.txt'),"w") as f:
f.write(readme)
def evaluate_model(sess, model, dataloader):
avg_loss = 0.0
avg_ac_loss = 0.0
avg_mhe_loss = 0.0
bn = dataloader.size // dataloader.batch_size
for i in range(bn):
batch_zi_array, batch_lens = dataloader.get_random_batch_data()
feed = {
model.input_data: batch_zi_array,
model.seq_lens: batch_lens,
}
(loss, ac_loss, mhe_loss) = sess.run([model.loss, model.ac_loss, model.mhe_loss], feed)
avg_loss += loss
avg_ac_loss += ac_loss
avg_mhe_loss += mhe_loss
bn += 1
avg_loss /= bn
avg_ac_loss /= bn
avg_mhe_loss /= bn
return avg_loss, avg_ac_loss, avg_mhe_loss
def train(sess, model, valid_model, train_dataloader, valid_dataloader, log_root):
"""Train a model."""
# Setup summary writer.
os.makedirs(log_root, exist_ok=True)
train_summary_writer = tf.summary.FileWriter(log_root + "/train_log", sess.graph)
valid_summary_writer = tf.summary.FileWriter(log_root + "/valid_log")
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
t_vars = tf.trainable_variables()
count_t_vars = 0
for var in t_vars:
num_param = np.prod(var.get_shape().as_list())
count_t_vars += num_param
print('%s %s %d' % (var.name, str(var.get_shape()), num_param))
print('Total trainable variables %d.' % count_t_vars)
best_valid_loss = 999999.0
MIN_LR, DECAY = 0.0000001, 0.9999
for step in range(1000000):
start = time.time()
batch_zi_array, batch_len_array = train_dataloader.get_random_batch_data()
curr_learning_rate = (model.init_lr - MIN_LR) * (DECAY ** step) + MIN_LR
feed = {
model.input_data: batch_zi_array,
model.seq_lens: batch_len_array,
model.lr: curr_learning_rate,
}
# training
(_, loss, ac_loss, mhe_loss, summ_str) = sess.run([model.train_op, model.loss, model.ac_loss, model.mhe_loss, model.summ], feed)
train_summary_writer.add_summary(summ_str, step)
train_summary_writer.flush()
# log
if step % 50 == 0 and step > 0:
print("Train step %d, lr:%.6f, loss: %.4f, ac_loss: %.4f, mhe_loss: %.4f; train time: %.2fs" % (
step, curr_learning_rate, loss, ac_loss, mhe_loss, time.time() - start))
# validation
if step % 500 == 0 and step > 0:
start = time.time()
print("validating...", log_root)
valid_loss, valid_ac_loss, valid_mhe_loss = evaluate_model(sess, valid_model, valid_dataloader)
valid_loss_summ = tf.summary.Summary()
valid_loss_summ.value.add(tag='valid_loss', simple_value=float(valid_loss))
valid_loss_summ.value.add(tag='valid_ac_loss', simple_value=float(valid_ac_loss))
valid_loss_summ.value.add(tag='valid_mhe_loss', simple_value=float(valid_mhe_loss))
print("Best valid loss: %.4f, loss: %.4f, ac_loss: %.4f, mhe_loss: %.4f; valid time: %.2fs" % (
best_valid_loss, valid_loss, valid_ac_loss, valid_mhe_loss, time.time() - start))
valid_summary_writer.add_summary(valid_loss_summ, step)
valid_summary_writer.flush()
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
print("Better model, best_valid_loss: %.4f" % best_valid_loss)
save_model(sess, saver, log_root, step)
def trainer(npzs,
args,
train_GB_range,
valid_GB_range):
reset_graph()
model = Style_Enc_Model(
init_lr = args.init_lr,
num_writer_per_batch=args.num_writer_per_batch,
num_entry_per_writer=args.num_entry_per_writer,
max_seq_len=args.max_seq_len,
rnn_size=args.rnn_size,
rnn_layers=args.rnn_layers,
embedding_dim = args.embedding_dim,
ac_softmax_m=args.ac_softmax_m,
ac_softmax_s=args.ac_softmax_s,
mhe_lambda=args.mhe_lambda,
input_keep_prob = args.input_keep_prob,
output_keep_prob = args.output_keep_prob,
state_keep_prob = args.state_keep_prob)
valid_model = Style_Enc_Model(
init_lr = args.init_lr,
num_writer_per_batch=args.num_writer_per_batch,
num_entry_per_writer=args.num_entry_per_writer,
max_seq_len=args.max_seq_len,
rnn_size=args.rnn_size,
rnn_layers=args.rnn_layers,
embedding_dim = args.embedding_dim,
ac_softmax_m=args.ac_softmax_m,
ac_softmax_s=args.ac_softmax_s,
mhe_lambda=args.mhe_lambda,
input_keep_prob = 1.0,
output_keep_prob = 1.0,
state_keep_prob = 1.0,
reuse=True,
is_training=False)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
print("train data:")
train_dataloader = CHN_Style_DataLoader(
npzs,
GB_range = train_GB_range,
is_traing=True,
num_writer_per_batch=args.num_writer_per_batch,
num_entry_per_writer=args.num_entry_per_writer,
max_seq_len=args.max_seq_len,
scale_factor=200.0, # for whole data set
random_scale_factor=args.random_scale_factor, # only for training
augment_stroke_prob=args.augment_stroke_prob
)
print("valid data:")
valid_dataloader = CHN_Style_DataLoader(
npzs,
GB_range = valid_GB_range,
is_traing=False,
num_writer_per_batch=args.num_writer_per_batch,
num_entry_per_writer=args.num_entry_per_writer,
max_seq_len=args.max_seq_len,
scale_factor=200.0, # for whole data set
random_scale_factor=0.0,
augment_stroke_prob=0.0
)
train(sess, model, valid_model, train_dataloader, valid_dataloader, args.log_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--init_lr", type=float, default=0.001, help="init learning rate")
parser.add_argument("--num_writer_per_batch", type=int, default=64)
parser.add_argument("--num_entry_per_writer", type=int, default=16)
parser.add_argument("--max_seq_len", type=int, default=110)
parser.add_argument("--random_scale_factor", type=float, default=0.1)
parser.add_argument("--augment_stroke_prob", type=float, default=0.1)
parser.add_argument("--rnn_size", type=int, default=256)
parser.add_argument("--rnn_layers", type=int, default=1)
parser.add_argument("--embedding_dim", type=int, default=256)
parser.add_argument("--ac_softmax_m", type=float, default=0.35)
parser.add_argument("--ac_softmax_s", type=int, default=30)
parser.add_argument("--mhe_lambda", type=float, default=0.0)
parser.add_argument("--input_keep_prob", type=float, default=1.0)
parser.add_argument("--output_keep_prob", type=float, default=1.0)
parser.add_argument("--state_keep_prob", type=float, default=1.0)
parser.add_argument("--log_root", type=str, default="./models/demo")
args = parser.parse_args()
# CUDA_VISIBLE_DEVICES=3 python style_enc_train.py --rnn_layers=4 --ac_softmax_m=0.1 --log_root=./models/0107_layer4_m010
npz_10 = [os.path.join(NPZ_DIR, "%03d.npz" % n) for n in range(1, 421)]
npz_11 = [os.path.join(NPZ_DIR, "%d.npz" % n) for n in range(1001, 1301)]
npz_12 = [os.path.join(NPZ_DIR, "%d.npz" % n) for n in range(501, 801)]
# npz_C = [os.path.join(NPZ_DIR, "C%03d-f.npz" % n) for n in range(1, 61)]
npzs = npz_10 + npz_11 + npz_12
for npz in npzs:
assert os.path.exists(npz)
readme = "%d writers: 1.0 %d, 1.1 %d, 1.2 %d." % (len(npzs), len(npz_10), len(npz_11), len(npz_12))
print(readme)
readme += "\nGB range: train(201-6763), valid(1-200)"
print_save_args(args, args.log_root, readme=readme)
trainer(npzs,
args = args,
train_GB_range=(201, 6763),
valid_GB_range=(1, 200))
if __name__ == '__main__':
main()
| 5,715
| 0
| 92
|
dfa14b2ec00f0ef5059bf740c1c8705161a50cb8
| 225
|
py
|
Python
|
LivroCap1/PagarAluguel.py
|
Lucas-py/Python-Basico02
|
802187be3dc948a743c5883b1d56f52163f7450d
|
[
"MIT"
] | null | null | null |
LivroCap1/PagarAluguel.py
|
Lucas-py/Python-Basico02
|
802187be3dc948a743c5883b1d56f52163f7450d
|
[
"MIT"
] | null | null | null |
LivroCap1/PagarAluguel.py
|
Lucas-py/Python-Basico02
|
802187be3dc948a743c5883b1d56f52163f7450d
|
[
"MIT"
] | null | null | null |
km = float(input('digite km rodados: '))
dias = float(input('total de dias: '))
total_de_km = km * 0.15
total_de_dias = dias * 60
total_a_pagar = total_de_dias + total_de_km
print(f'total a pagar e: {total_a_pagar} R$')
| 18.75
| 45
| 0.693333
|
km = float(input('digite km rodados: '))
dias = float(input('total de dias: '))
total_de_km = km * 0.15
total_de_dias = dias * 60
total_a_pagar = total_de_dias + total_de_km
print(f'total a pagar e: {total_a_pagar} R$')
| 0
| 0
| 0
|
8a84843266300b75a474714ae72939c9085e1f51
| 31,763
|
py
|
Python
|
msgraph-cli-extensions/beta/usersfunctions_beta/azext_usersfunctions_beta/generated/custom.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/beta/usersfunctions_beta/azext_usersfunctions_beta/generated/custom.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/beta/usersfunctions_beta/azext_usersfunctions_beta/generated/custom.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
| 50.337559
| 120
| 0.412681
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
def usersfunctions_user_activity_recent(client,
user_id):
return client.recent(user_id=user_id)
def usersfunctions_user_calendar_calendar_view_calendar_allowed_calendar_sharing_role(client,
user_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
event_id=event_id,
user=user)
def usersfunctions_user_calendar_calendar_view_exception_occurrence_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_calendar_calendar_view_instance_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_calendar_calendar_view_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_user_calendar_event_calendar_allowed_calendar_sharing_role(client,
user_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
event_id=event_id,
user=user)
def usersfunctions_user_calendar_event_exception_occurrence_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_calendar_event_instance_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_calendar_event_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_user_calendar_allowed_calendar_sharing_role(client,
user_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
user=user)
def usersfunctions_user_calendar_group_calendar_calendar_view_calendar_allowed_calendar_sharing_role(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
user=user)
def usersfunctions_user_calendar_group_calendar_calendar_view_exception_occurrence_delta(client,
user_id,
calendar_group_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_user_calendar_group_calendar_calendar_view_instance_delta(client,
user_id,
calendar_group_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_user_calendar_group_calendar_calendar_view_delta(client,
user_id,
calendar_group_id,
calendar_id):
return client.delta(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id)
def usersfunctions_user_calendar_group_calendar_event_calendar_allowed_calendar_sharing_role(client,
user_id,
calendar_group_id,
calendar_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id,
user=user)
def usersfunctions_user_calendar_group_calendar_event_exception_occurrence_delta(client,
user_id,
calendar_group_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_user_calendar_group_calendar_event_instance_delta(client,
user_id,
calendar_group_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_user_calendar_group_calendar_event_delta(client,
user_id,
calendar_group_id,
calendar_id):
return client.delta(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id)
def usersfunctions_user_calendar_group_calendar_allowed_calendar_sharing_role(client,
user_id,
calendar_group_id,
calendar_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_group_id=calendar_group_id,
calendar_id=calendar_id,
user=user)
def usersfunctions_user_calendar_calendar_view_calendar_allowed_calendar_sharing_role(client,
user_id,
calendar_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
user=user)
def usersfunctions_user_calendar_calendar_view_exception_occurrence_delta(client,
user_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_user_calendar_calendar_view_instance_delta(client,
user_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_user_calendar_calendar_view_delta(client,
user_id,
calendar_id):
return client.delta(user_id=user_id,
calendar_id=calendar_id)
def usersfunctions_user_calendar_event_calendar_allowed_calendar_sharing_role(client,
user_id,
calendar_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id,
user=user)
def usersfunctions_user_calendar_event_exception_occurrence_delta(client,
user_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_user_calendar_event_instance_delta(client,
user_id,
calendar_id,
event_id):
return client.delta(user_id=user_id,
calendar_id=calendar_id,
event_id=event_id)
def usersfunctions_user_calendar_event_delta(client,
user_id,
calendar_id):
return client.delta(user_id=user_id,
calendar_id=calendar_id)
def usersfunctions_user_calendar_allowed_calendar_sharing_role(client,
user_id,
calendar_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
calendar_id=calendar_id,
user=user)
def usersfunctions_user_calendar_view_calendar_calendar_view_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_calendar_view_calendar_event_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_calendar_view_calendar_allowed_calendar_sharing_role(client,
user_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
event_id=event_id,
user=user)
def usersfunctions_user_calendar_view_exception_occurrence_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_calendar_view_instance_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_calendar_view_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_user_contact_folder_child_folder_delta(client,
user_id,
contact_folder_id):
return client.delta(user_id=user_id,
contact_folder_id=contact_folder_id)
def usersfunctions_user_contact_folder_contact_delta(client,
user_id,
contact_folder_id):
return client.delta(user_id=user_id,
contact_folder_id=contact_folder_id)
def usersfunctions_user_contact_folder_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_user_contact_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_user_event_calendar_calendar_view_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_event_calendar_event_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_event_calendar_allowed_calendar_sharing_role(client,
user_id,
event_id,
user):
return client.allowed_calendar_sharing_roles(user_id=user_id,
event_id=event_id,
user=user)
def usersfunctions_user_event_exception_occurrence_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_event_instance_delta(client,
user_id,
event_id):
return client.delta(user_id=user_id,
event_id=event_id)
def usersfunctions_user_event_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_user_mail_folder_child_folder_delta(client,
user_id,
mail_folder_id):
return client.delta(user_id=user_id,
mail_folder_id=mail_folder_id)
def usersfunctions_user_mail_folder_message_delta(client,
user_id,
mail_folder_id):
return client.delta(user_id=user_id,
mail_folder_id=mail_folder_id)
def usersfunctions_user_mail_folder_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_user_managed_app_registration_show_user_id_with_flagged_app_registration(client,
user_id):
return client.get_user_ids_with_flagged_app_registration(user_id=user_id)
def usersfunctions_user_managed_device_show_file_vault_key(client,
user_id,
managed_device_id):
return client.get_file_vault_key(user_id=user_id,
managed_device_id=managed_device_id)
def usersfunctions_user_managed_device_show_non_compliant_setting(client,
user_id,
managed_device_id):
return client.get_non_compliant_settings(user_id=user_id,
managed_device_id=managed_device_id)
def usersfunctions_user_message_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_user_delta(client):
return client.delta()
def usersfunctions_user_export_device_and_app_management_data_d390(client,
user_id):
return client.export_device_and_app_management_data_d390(user_id=user_id)
def usersfunctions_user_export_device_and_app_management_data623_c(client,
user_id,
skip,
top):
return client.export_device_and_app_management_data623_c(user_id=user_id,
skip=skip,
top=top)
def usersfunctions_user_find_room_ac49(client,
user_id,
room_list):
return client.find_rooms_ac49(user_id=user_id,
room_list=room_list)
def usersfunctions_user_find_room_d266(client,
user_id):
return client.find_rooms_d266(user_id=user_id)
def usersfunctions_user_find_room_list(client,
user_id):
return client.find_room_lists(user_id=user_id)
def usersfunctions_user_is_managed_app_user_blocked(client,
user_id):
return client.is_managed_app_user_blocked(user_id=user_id)
def usersfunctions_user_reminder_view(client,
user_id,
start_date_time,
end_date_time):
return client.reminder_view(user_id=user_id,
start_date_time=start_date_time,
end_date_time=end_date_time)
def usersfunctions_user_show_effective_device_enrollment_configuration(client,
user_id):
return client.get_effective_device_enrollment_configurations(user_id=user_id)
def usersfunctions_user_show_logged_on_managed_device(client,
user_id):
return client.get_logged_on_managed_devices(user_id=user_id)
def usersfunctions_user_show_managed_app_blocked_user(client):
return client.get_managed_app_blocked_users()
def usersfunctions_user_show_managed_app_diagnostic_statuses(client,
user_id):
return client.get_managed_app_diagnostic_statuses(user_id=user_id)
def usersfunctions_user_show_managed_app_policy(client,
user_id):
return client.get_managed_app_policies(user_id=user_id)
def usersfunctions_user_show_managed_device_with_app_failure(client,
user_id):
return client.get_managed_devices_with_app_failures(user_id=user_id)
def usersfunctions_user_show_managed_device_with_failed_or_pending_app(client,
user_id):
return client.get_managed_devices_with_failed_or_pending_apps(user_id=user_id)
def usersfunctions_user_onenote_notebook_section_group_section_page_preview(client,
user_id,
notebook_id,
section_group_id,
onenote_section_id,
onenote_page_id):
return client.preview(user_id=user_id,
notebook_id=notebook_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id)
def usersfunctions_user_onenote_notebook_section_page_preview(client,
user_id,
notebook_id,
onenote_section_id,
onenote_page_id):
return client.preview(user_id=user_id,
notebook_id=notebook_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id)
def usersfunctions_user_onenote_notebook_show_recent_notebook(client,
user_id,
include_personal_notebooks):
if include_personal_notebooks is None:
include_personal_notebooks = False
return client.get_recent_notebooks(user_id=user_id,
include_personal_notebooks=include_personal_notebooks)
def usersfunctions_user_onenote_page_preview(client,
user_id,
onenote_page_id):
return client.preview(user_id=user_id,
onenote_page_id=onenote_page_id)
def usersfunctions_user_onenote_page_parent_notebook_section_group_section_page_preview(client,
user_id,
onenote_page_id,
section_group_id,
onenote_section_id,
onenote_page_id1):
return client.preview(user_id=user_id,
onenote_page_id=onenote_page_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id1=onenote_page_id1)
def usersfunctions_user_onenote_page_parent_notebook_section_page_preview(client,
user_id,
onenote_page_id,
onenote_section_id,
onenote_page_id1):
return client.preview(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_section_id=onenote_section_id,
onenote_page_id1=onenote_page_id1)
def usersfunctions_user_onenote_page_parent_section_page_preview(client,
user_id,
onenote_page_id,
onenote_page_id1):
return client.preview(user_id=user_id,
onenote_page_id=onenote_page_id,
onenote_page_id1=onenote_page_id1)
def usersfunctions_user_onenote_section_group_parent_notebook_section_page_preview(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id):
return client.preview(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id)
def usersfunctions_user_onenote_section_group_section_page_preview(client,
user_id,
section_group_id,
onenote_section_id,
onenote_page_id):
return client.preview(user_id=user_id,
section_group_id=section_group_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id)
def usersfunctions_user_onenote_section_page_preview(client,
user_id,
onenote_section_id,
onenote_page_id):
return client.preview(user_id=user_id,
onenote_section_id=onenote_section_id,
onenote_page_id=onenote_page_id)
def usersfunctions_user_outlook_supported_language(client,
user_id):
return client.supported_languages(user_id=user_id)
def usersfunctions_user_outlook_supported_time_zone_ee48(client,
user_id):
return client.supported_time_zones_ee48(user_id=user_id)
def usersfunctions_user_outlook_supported_time_zones51_c6(client,
user_id,
time_zone_standard):
return client.supported_time_zones51_c6(user_id=user_id,
time_zone_standard=time_zone_standard)
def usersfunctions_user_planner_all_delta(client,
user_id):
return client.delta(user_id=user_id)
def usersfunctions_user_todo_list_task_delta(client,
user_id,
todo_task_list_id):
return client.delta(user_id=user_id,
todo_task_list_id=todo_task_list_id)
def usersfunctions_user_todo_list_delta(client,
user_id):
return client.delta(user_id=user_id)
| 29,048
| 0
| 2,050
|
2205b1d3c57e5bd6fc3104e3b2ab599b5cfe916e
| 12,357
|
py
|
Python
|
PyISY/Climate.py
|
sneelco/PyISY
|
f1f916cd7951b1b6a5235bb36444c695fe3294e1
|
[
"Apache-2.0"
] | 2
|
2015-08-04T14:02:58.000Z
|
2016-12-15T05:54:00.000Z
|
PyISY/Climate.py
|
sneelco/PyISY
|
f1f916cd7951b1b6a5235bb36444c695fe3294e1
|
[
"Apache-2.0"
] | null | null | null |
PyISY/Climate.py
|
sneelco/PyISY
|
f1f916cd7951b1b6a5235bb36444c695fe3294e1
|
[
"Apache-2.0"
] | null | null | null |
from VarEvents import Property
from VarEvents import Var
from time import sleep
from xml.dom import minidom
class Climate(object):
"""
This class handles the ISY climate module.
| parent: ISY class
| xml: String of xml data containing the climate data
:ivar Gust_Speed: Watched Variable representing the gust speed.
:ivar Temperature: Watched Variable representing the temperature.
:ivar Temperature_Rate: Watched Variable representing the temperature rate.
:ivar Rain_Rate: Watched Variable representing the rain rate.
:ivar Max_Rain_Rate: Watched Variable representing the rain rate.
:ivar Temperature_High: Watched Variable representing the high temperature.
:ivar Pressure_Rate: Watched variable representing the pressure rate.
:ivar Wind_Speed: Watched Variable representing the wind speed.
:ivar Elevation: Watched Variable representing the elevation.
:ivar Dew_Point: Watched Variable representing the dew point.
:ivar Wind_Average_Speed: Watched Variable representing the avg wind speed.
:ivar Pressure: Watched Variable representing the pressure.
:ivar Gust_Direction: Watched Variable representing the gust direction.
:ivar Wind_Average_Direction: Watched Variable representing the average wind
direction.
:ivar Light: Watched Variable representing the amount of light.
:ivar Wind_Direction: Watched Variable representing the wind direction.
:ivar Humidity: Watched Variable representing the humidity.
:ivar Humidity_Rate: Watched Variable representing the humidity rate.
:ivar Rain_Today: Watched Variable representing the forcast rain today.
:ivar Light_Rate: Watched Variable representing the light rate.
:ivar Water_Deficit_Yesterday: Watched Variable representing the water
deficit yesterday.
:ivar Irrigation_Requirement: Watched Variable representing the irrigation
requirement.
:ivar Feels_Like: Watched Variable representing the feels like temperature.
:ivar Temperature_Low: Watched Variable representing the low temperature.
:ivar Evapotranspiration: Watched Variable representing the
evapotranspiration amount.
:ivar Gust_Speed_units: Gust speed units.
:ivar Temperature_units: Temperature units.
:ivar Temperature_Rate_units: Temperature rate units.
:ivar Rain_Rate_units: Rain rate units.
:ivar Max_Rain_Rate_units: Max rain rate units.
:ivar Temperature_High_units: High temperature units.
:ivar Pressure_Rate_units: Pressure rate units.
:ivar Wind_Speed_units: Wind speed units.
:ivar Elevation_units: Elevation units.
:ivar Dew_Point_units: Dew point units.
:ivar Wind_Average_Speed_units: Average wind speed units.
:ivar Pressure_units: Pressure units.
:ivar Gust_Direction_units: Gust direction units.
:ivar Wind_Average_Direction_units: Average wind direction units.
:ivar Light_units: Light amount units.
:ivar Wind_Direction_units: Wind direction units.
:ivar Humidity_units: Humidity units.
:ivar Humidity_Rate_units: Humidity rate units.
:ivar Rain_Today_units: Rain forecast units.
:ivar Light_Rate_units: Light rate units.
:ivar Water_Deficit_Yesterday_units: Water deficit units.
:ivar Irrigation_Requirement_units: Irrigation requirement units.
:ivar Feels_Like_units: Feels like temperature units.
:ivar Temperature_Low_units: Low temperature units.
:ivar Evapotranspiration_units: Evapotranspiration units.
"""
# Values
_id2name = ['Temperature', 'Temperature_High', 'Temperature_Low',
'Feels_Like', 'Temperature_Average', 'Humidity', None,
'Pressure', None, 'Dew_Point', 'Wind_Speed', None,
'Wind_Direction', None, 'Gust_Speed', None, 'Total_Rain_Today',
'Light', None, None, None, 'Evapotranspiration',
'Irrigation_Requirement', 'Water_Deficit_Yesterday',
'Elevation', None, None, None, None,
'Average_Temperature_Tomorrow', 'High_Temperature_Tomorrow',
'Low_Temperature_Tomorrow', 'Humidity_Tomorrow',
'Wind_Speed_Tomorrow', 'Gust_Speed_Tomorrow', 'Rain_Tomorrow',
'Snow_Tomorrow', None, None, None, None,
'Forecast_Average_Temperature', 'Forecast_High_Temperature',
'Forecast_Low_Temperature', 'Forecast_Humidity',
'Forecast_Rain', 'Forecast_Snow', None, None, None, None]
# value properties
Temperature = Property(0, readonly=True)
Temperature_High = Property(0, readonly=True)
Temperature_Low = Property(0, readonly=True)
Feels_Like = Property(0, readonly=True)
Temperature_Average = Property(0, readonly=True)
Humidity = Property(0, readonly=True)
Pressure = Property(0, readonly=True)
Dew_Point = Property(0, readonly=True)
Wind_Speed = Property(0, readonly=True)
Wind_Direction = Property(0, readonly=True)
Gust_Speed = Property(0, readonly=True)
Total_Rain_Today = Property(0, readonly=True)
Light = Property(0, readonly=True)
Evapotranspiration = Property(0, readonly=True)
Irrigation_Requirement = Property(0, readonly=True)
Water_Deficit_Yesterday = Property(0, readonly=True)
Elevation = Property(0, readonly=True)
# Coverage = Property(0, readonly=True)
# Intensity = Property(0, readonly=True)
# Weather_Condition = Property(0, readonly=True)
# Cloud_Condition = Property(0, readonly=True)
Average_Temperature_Tomorrow = Property(0, readonly=True)
High_Temperature_Tomorrow = Property(0, readonly=True)
Low_Temperature_Tomorrow = Property(0, readonly=True)
Humidity_Tomorrow = Property(0, readonly=True)
Wind_Speed_Tomorrow = Property(0, readonly=True)
Gust_Speed_Tomorrow = Property(0, readonly=True)
Rain_Tomorrow = Property(0, readonly=True)
Snow_Tomorrow = Property(0, readonly=True)
# Coverage_Tomorrow = Property(0, readonly=True)
# Intensity_Tomorrow = Property(0, readonly=True)
# Weather_Condition_Tomorrow = Property(0, readonly=True)
# Cloud_Condition_Tomorrow = Property(0, readonly=True)
Forecast_Average_Temperature = Property(0, readonly=True)
Forecast_High_Temperature = Property(0, readonly=True)
Forecast_Low_Temperature = Property(0, readonly=True)
Forecast_Humidity = Property(0, readonly=True)
Forecast_Rain = Property(0, readonly=True)
Forecast_Snow = Property(0, readonly=True)
# Forecast_Coverage = Property(0, readonly=True)
# Forecast_Intensity = Property(0, readonly=True)
# Forecast_Weather_Condition = Property(0, readonly=True)
# Forecast_Cloud_Condition = Property(0, readonly=True)
# unit properties
Temperature_units = ''
Temperature_High_units = ''
Temperature_Low_units = ''
Feels_Like_units = ''
Temperature_Average_units = ''
Humidity_units = ''
Pressure_units = ''
Dew_Point_units = ''
Wind_Speed_units = ''
Wind_Direction_units = ''
Gust_Speed_units = ''
Total_Rain_Today_units = ''
Light_units = ''
Evapotranspiration_units = ''
Irrigation_Requirement_units = ''
Water_Deficit_Yesterday_units = ''
Elevation_units = ''
# Coverage_units = ''
# Intensity_units = ''
# Weather_Condition_units = ''
# Cloud_Condition_units = ''
Average_Temperature_Tomorrow_units = ''
High_Temperature_Tomorrow_units = ''
Low_Temperature_Tomorrow_units = ''
Humidity_Tomorrow_units = ''
Wind_Speed_Tomorrow_units = ''
Gust_Speed_Tomorrow_units = ''
Rain_Tomorrow_units = ''
Snow_Tomorrow_units = ''
# Coverage_Tomorrow_units = ''
# Intensity_Tomorrow_units = ''
# Weather_Condition_Tomorrow_units = ''
# Cloud_Condition_Tomorrow_units = ''
Forecast_Average_Temperature_units = ''
Forecast_High_Temperature_units = ''
Forecast_Low_Temperature_units = ''
Forecast_Humidity_units = ''
Forecast_Rain_units = ''
Forecast_Snow_units = ''
# Forecast_Coverage_units = ''
# Forecast_Intensity_units = ''
# Forecast_Weather_Condition_units = ''
# Forecast_Cloud_Condition_units = ''
def __str__(self):
""" Returns a string representing the climate manager. """
return 'Climate Module'
def __repr__(self):
""" Returns a long string showing all the climate values. """
out = 'Climate Module\n'
for attr_name in dir(self):
attr = getattr(self, attr_name)
if isinstance(attr, Var):
units = getattr(self, attr_name + '_units')
out += ' ' + attr_name + ' = ' + str(attr) \
+ ' ' + units + '\n'
return out
def parse(self, xml):
"""
Parses the xml data.
xml: String of the xml data
"""
try:
xmldoc = minidom.parseString(xml)
except:
self.parent.log.error('ISY Could not parse climate, poorly '
+ 'formatted XML.')
else:
# parse definitions
feature = xmldoc.getElementsByTagName('climate')[0]
for node in feature.childNodes:
(val, unit) = self._parse_val(node.firstChild.toxml())
name = node.nodeName
try:
prop = getattr(self, name)
prop.update(val, force=True, silent=True)
setattr(self, name + '_units', unit)
except:
pass
self.parent.log.info('ISY Loaded Environment Data')
def update(self, waitTime=0):
"""
Updates the contents of the climate class
waitTime: [optional] Amount of seconds to wait before updating
"""
sleep(waitTime)
xml = self.parent.conn.getClimate()
self.parse(xml)
| 42.463918
| 81
| 0.62936
|
from VarEvents import Property
from VarEvents import Var
from time import sleep
from xml.dom import minidom
class Climate(object):
"""
This class handles the ISY climate module.
| parent: ISY class
| xml: String of xml data containing the climate data
:ivar Gust_Speed: Watched Variable representing the gust speed.
:ivar Temperature: Watched Variable representing the temperature.
:ivar Temperature_Rate: Watched Variable representing the temperature rate.
:ivar Rain_Rate: Watched Variable representing the rain rate.
:ivar Max_Rain_Rate: Watched Variable representing the rain rate.
:ivar Temperature_High: Watched Variable representing the high temperature.
:ivar Pressure_Rate: Watched variable representing the pressure rate.
:ivar Wind_Speed: Watched Variable representing the wind speed.
:ivar Elevation: Watched Variable representing the elevation.
:ivar Dew_Point: Watched Variable representing the dew point.
:ivar Wind_Average_Speed: Watched Variable representing the avg wind speed.
:ivar Pressure: Watched Variable representing the pressure.
:ivar Gust_Direction: Watched Variable representing the gust direction.
:ivar Wind_Average_Direction: Watched Variable representing the average wind
direction.
:ivar Light: Watched Variable representing the amount of light.
:ivar Wind_Direction: Watched Variable representing the wind direction.
:ivar Humidity: Watched Variable representing the humidity.
:ivar Humidity_Rate: Watched Variable representing the humidity rate.
:ivar Rain_Today: Watched Variable representing the forcast rain today.
:ivar Light_Rate: Watched Variable representing the light rate.
:ivar Water_Deficit_Yesterday: Watched Variable representing the water
deficit yesterday.
:ivar Irrigation_Requirement: Watched Variable representing the irrigation
requirement.
:ivar Feels_Like: Watched Variable representing the feels like temperature.
:ivar Temperature_Low: Watched Variable representing the low temperature.
:ivar Evapotranspiration: Watched Variable representing the
evapotranspiration amount.
:ivar Gust_Speed_units: Gust speed units.
:ivar Temperature_units: Temperature units.
:ivar Temperature_Rate_units: Temperature rate units.
:ivar Rain_Rate_units: Rain rate units.
:ivar Max_Rain_Rate_units: Max rain rate units.
:ivar Temperature_High_units: High temperature units.
:ivar Pressure_Rate_units: Pressure rate units.
:ivar Wind_Speed_units: Wind speed units.
:ivar Elevation_units: Elevation units.
:ivar Dew_Point_units: Dew point units.
:ivar Wind_Average_Speed_units: Average wind speed units.
:ivar Pressure_units: Pressure units.
:ivar Gust_Direction_units: Gust direction units.
:ivar Wind_Average_Direction_units: Average wind direction units.
:ivar Light_units: Light amount units.
:ivar Wind_Direction_units: Wind direction units.
:ivar Humidity_units: Humidity units.
:ivar Humidity_Rate_units: Humidity rate units.
:ivar Rain_Today_units: Rain forecast units.
:ivar Light_Rate_units: Light rate units.
:ivar Water_Deficit_Yesterday_units: Water deficit units.
:ivar Irrigation_Requirement_units: Irrigation requirement units.
:ivar Feels_Like_units: Feels like temperature units.
:ivar Temperature_Low_units: Low temperature units.
:ivar Evapotranspiration_units: Evapotranspiration units.
"""
# Values
_id2name = ['Temperature', 'Temperature_High', 'Temperature_Low',
'Feels_Like', 'Temperature_Average', 'Humidity', None,
'Pressure', None, 'Dew_Point', 'Wind_Speed', None,
'Wind_Direction', None, 'Gust_Speed', None, 'Total_Rain_Today',
'Light', None, None, None, 'Evapotranspiration',
'Irrigation_Requirement', 'Water_Deficit_Yesterday',
'Elevation', None, None, None, None,
'Average_Temperature_Tomorrow', 'High_Temperature_Tomorrow',
'Low_Temperature_Tomorrow', 'Humidity_Tomorrow',
'Wind_Speed_Tomorrow', 'Gust_Speed_Tomorrow', 'Rain_Tomorrow',
'Snow_Tomorrow', None, None, None, None,
'Forecast_Average_Temperature', 'Forecast_High_Temperature',
'Forecast_Low_Temperature', 'Forecast_Humidity',
'Forecast_Rain', 'Forecast_Snow', None, None, None, None]
# value properties
Temperature = Property(0, readonly=True)
Temperature_High = Property(0, readonly=True)
Temperature_Low = Property(0, readonly=True)
Feels_Like = Property(0, readonly=True)
Temperature_Average = Property(0, readonly=True)
Humidity = Property(0, readonly=True)
Pressure = Property(0, readonly=True)
Dew_Point = Property(0, readonly=True)
Wind_Speed = Property(0, readonly=True)
Wind_Direction = Property(0, readonly=True)
Gust_Speed = Property(0, readonly=True)
Total_Rain_Today = Property(0, readonly=True)
Light = Property(0, readonly=True)
Evapotranspiration = Property(0, readonly=True)
Irrigation_Requirement = Property(0, readonly=True)
Water_Deficit_Yesterday = Property(0, readonly=True)
Elevation = Property(0, readonly=True)
# Coverage = Property(0, readonly=True)
# Intensity = Property(0, readonly=True)
# Weather_Condition = Property(0, readonly=True)
# Cloud_Condition = Property(0, readonly=True)
Average_Temperature_Tomorrow = Property(0, readonly=True)
High_Temperature_Tomorrow = Property(0, readonly=True)
Low_Temperature_Tomorrow = Property(0, readonly=True)
Humidity_Tomorrow = Property(0, readonly=True)
Wind_Speed_Tomorrow = Property(0, readonly=True)
Gust_Speed_Tomorrow = Property(0, readonly=True)
Rain_Tomorrow = Property(0, readonly=True)
Snow_Tomorrow = Property(0, readonly=True)
# Coverage_Tomorrow = Property(0, readonly=True)
# Intensity_Tomorrow = Property(0, readonly=True)
# Weather_Condition_Tomorrow = Property(0, readonly=True)
# Cloud_Condition_Tomorrow = Property(0, readonly=True)
Forecast_Average_Temperature = Property(0, readonly=True)
Forecast_High_Temperature = Property(0, readonly=True)
Forecast_Low_Temperature = Property(0, readonly=True)
Forecast_Humidity = Property(0, readonly=True)
Forecast_Rain = Property(0, readonly=True)
Forecast_Snow = Property(0, readonly=True)
# Forecast_Coverage = Property(0, readonly=True)
# Forecast_Intensity = Property(0, readonly=True)
# Forecast_Weather_Condition = Property(0, readonly=True)
# Forecast_Cloud_Condition = Property(0, readonly=True)
# unit properties
Temperature_units = ''
Temperature_High_units = ''
Temperature_Low_units = ''
Feels_Like_units = ''
Temperature_Average_units = ''
Humidity_units = ''
Pressure_units = ''
Dew_Point_units = ''
Wind_Speed_units = ''
Wind_Direction_units = ''
Gust_Speed_units = ''
Total_Rain_Today_units = ''
Light_units = ''
Evapotranspiration_units = ''
Irrigation_Requirement_units = ''
Water_Deficit_Yesterday_units = ''
Elevation_units = ''
# Coverage_units = ''
# Intensity_units = ''
# Weather_Condition_units = ''
# Cloud_Condition_units = ''
Average_Temperature_Tomorrow_units = ''
High_Temperature_Tomorrow_units = ''
Low_Temperature_Tomorrow_units = ''
Humidity_Tomorrow_units = ''
Wind_Speed_Tomorrow_units = ''
Gust_Speed_Tomorrow_units = ''
Rain_Tomorrow_units = ''
Snow_Tomorrow_units = ''
# Coverage_Tomorrow_units = ''
# Intensity_Tomorrow_units = ''
# Weather_Condition_Tomorrow_units = ''
# Cloud_Condition_Tomorrow_units = ''
Forecast_Average_Temperature_units = ''
Forecast_High_Temperature_units = ''
Forecast_Low_Temperature_units = ''
Forecast_Humidity_units = ''
Forecast_Rain_units = ''
Forecast_Snow_units = ''
# Forecast_Coverage_units = ''
# Forecast_Intensity_units = ''
# Forecast_Weather_Condition_units = ''
# Forecast_Cloud_Condition_units = ''
def __init__(self, parent, xml=None):
super(Climate, self).__init__()
self.parent = parent
self.parse(xml)
def __str__(self):
""" Returns a string representing the climate manager. """
return 'Climate Module'
def __repr__(self):
""" Returns a long string showing all the climate values. """
out = 'Climate Module\n'
for attr_name in dir(self):
attr = getattr(self, attr_name)
if isinstance(attr, Var):
units = getattr(self, attr_name + '_units')
out += ' ' + attr_name + ' = ' + str(attr) \
+ ' ' + units + '\n'
return out
def parse(self, xml):
"""
Parses the xml data.
xml: String of the xml data
"""
try:
xmldoc = minidom.parseString(xml)
except:
self.parent.log.error('ISY Could not parse climate, poorly '
+ 'formatted XML.')
else:
# parse definitions
feature = xmldoc.getElementsByTagName('climate')[0]
for node in feature.childNodes:
(val, unit) = self._parse_val(node.firstChild.toxml())
name = node.nodeName
try:
prop = getattr(self, name)
prop.update(val, force=True, silent=True)
setattr(self, name + '_units', unit)
except:
pass
self.parent.log.info('ISY Loaded Environment Data')
def _parse_val(self, val):
try:
# assume standard val unit combination
(val, unit) = self._parse_val_num(val)
except ValueError:
# assume direction
(val, unit) = self._parse_val_dir(val)
return (val, unit)
def _parse_val_num(self, val):
split_val = val.split()
if len(split_val) == 2:
return (float(split_val[0]), split_val[1])
else:
# probably elevation, assume feet
return (float(split_val[0]), 'feet')
def _parse_val_dir(self, val):
dirs = {'N': 0.,
'NNE': 22.5,
'NE': 45.,
'ENE': 67.5,
'E': 90.,
'ESE': 112.5,
'SE': 135.,
'SSE': 157.5,
'S': 180.,
'SSW': 202.5,
'SW': 225.,
'WSW': 247.5,
'W': 270.,
'WNW': 292.5,
'NW': 315.,
'NNW': 337.5,
'N/A': None}
return (dirs[val], 'deg')
def update(self, waitTime=0):
"""
Updates the contents of the climate class
waitTime: [optional] Amount of seconds to wait before updating
"""
sleep(waitTime)
xml = self.parent.conn.getClimate()
self.parse(xml)
def _upmsg(self, xmldoc):
cid = int(xmldoc.getElementsByTagName('action')[0]
.firstChild.toxml()) - 1
val_raw = xmldoc.getElementsByTagName('value')[0] \
.firstChild.toxml().strip()
unit_raw = xmldoc.getElementsByTagName('unit')[0].firstChild
if unit_raw is not None:
unit_raw = unit_raw.toxml().strip()
else:
unit_raw = ''
if cid < len(self._id2name):
(val, unit) = self._parse_val((val_raw + ' ' + unit_raw).strip())
cname = self._id2name[cid]
if cname is not None:
attr = getattr(self, cname)
attr.update(val, force=True, silent=True)
setattr(self, cname + '_units', unit)
self.parent.log.info('ISY Updated Climate Value: ' + cname)
| 1,999
| 0
| 145
|
d5d2d48c33750918747531b040829ac393f0e6e7
| 4,203
|
py
|
Python
|
WTFUtils.py
|
Nierhain/WTFUtils
|
879eb5d9a8aea4f39524e3172f93e65f452d8819
|
[
"MIT"
] | null | null | null |
WTFUtils.py
|
Nierhain/WTFUtils
|
879eb5d9a8aea4f39524e3172f93e65f452d8819
|
[
"MIT"
] | null | null | null |
WTFUtils.py
|
Nierhain/WTFUtils
|
879eb5d9a8aea4f39524e3172f93e65f452d8819
|
[
"MIT"
] | null | null | null |
from PySide2.QtCore import Qt, Slot, QDir, QSettings
from PySide2.QtWidgets import QApplication, QMainWindow, QWidget, QAction, QPushButton, QFileDialog, QInputDialog, \
QLineEdit, QHBoxLayout, QVBoxLayout, QLabel
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
widget = Application()
window = MainWindow(widget)
window.resize(800, 600)
window.show()
sys.exit(app.exec_())
| 34.170732
| 119
| 0.678563
|
from PySide2.QtCore import Qt, Slot, QDir, QSettings
from PySide2.QtWidgets import QApplication, QMainWindow, QWidget, QAction, QPushButton, QFileDialog, QInputDialog, \
QLineEdit, QHBoxLayout, QVBoxLayout, QLabel
import sys
class Application(QWidget):
pathToWTF = "_retail_/WTF/Account"
def __init__(self):
QWidget.__init__(self)
self.config = QSettings("Nierhain", "WTFUtils")
self.layout = QHBoxLayout()
self.quit = QPushButton("Quit")
# Options
self.accountname = QLineEdit(self.config.value("accountname"))
self.realm = QLineEdit(self.config.value("realm"))
self.character = QLineEdit(self.config.value("character"))
self.wowPath = QLineEdit(self.config.value("wowPath"))
self.wowPath.picker = QPushButton('Select')
self.copyPath = QLineEdit(self.config.value("copyPath"))
self.copyPath.picker = QPushButton('Select')
self.backupPath = QLineEdit(self.config.value("backupPath"))
self.backupPath.picker = QPushButton('Select')
self.options = QVBoxLayout()
self.options.setMargin(10)
self.options.addWidget(QLabel("WoW path"))
self.options.addWidget(self.wowPath)
self.options.addWidget(self.wowPath.picker)
self.options.addWidget(QLabel("Character settings path"))
self.options.addWidget(self.copyPath)
self.options.addWidget(self.copyPath.picker)
self.options.addWidget(QLabel("Account name"))
self.options.addWidget(self.accountname)
self.options.addWidget(QLabel("Realm"))
self.options.addWidget(self.realm)
self.options.addWidget(QLabel("Character name"))
self.options.addWidget(self.character)
self.options.addWidget(QLabel("Backup folder path"))
self.options.addWidget(self.backupPath)
self.options.addWidget(self.backupPath.picker)
self.options.addStretch()
self.options.addWidget(self.quit)
self.layout.addLayout(self.options)
self.setLayout(self.layout)
self.realm.editingFinished.connect(self.setRealm)
self.quit.clicked.connect(self.quit_application)
self.copyPath.picker.clicked.connect(self.setCopyPath)
self.wowPath.picker.clicked.connect(self.setWowPath)
self.backupPath.picker.clicked.connect(self.setBackupPath)
self.character.editingFinished.connect(self.setCharacter)
@Slot()
def setRealm(self):
self.config.setValue("realm", self.realm.text())
@Slot()
def setCharacter(self):
self.config.setValue("character", self.character.text())
@Slot()
def setAccountname(self):
self.config.setValue("accountname", self.accountname.text())
@Slot()
def setCopyPath(self):
self.config.setValue("copyPath", QFileDialog.getExistingDirectory(self, "select character settings directory"))
self.copyPath.setText(self.config.value("copyPath"))
@Slot()
def setWowPath(self):
self.config.setValue("wowPath", QFileDialog.getExistingDirectory(self, "select wow directory"))
self.wowPath.setText(self.config.value("wowPath"))
@Slot()
def setBackupPath(self):
self.config.setValue("backupPath", QFileDialog.getExistingDirectory(self, "select backup directory"))
self.backupPath.setText(self.config.value("backupPath"))
@Slot()
def quit_application(self):
QApplication.quit()
class MainWindow(QMainWindow):
def __init__(self, widget):
QMainWindow.__init__(self)
self.setWindowTitle("WTFUtils - by Nierhain")
self.menu = self.menuBar()
self.file_menu = self.menu.addMenu("File")
exit_action = QAction("Exit", self)
exit_action.setShortcut("Ctrl+Q")
exit_action.triggered.connect(self.exit_app)
self.file_menu.addAction(exit_action)
self.setCentralWidget(widget)
@Slot()
def exit_app(self, checked):
QApplication.quit()
if __name__ == "__main__":
app = QApplication(sys.argv)
widget = Application()
window = MainWindow(widget)
window.resize(800, 600)
window.show()
sys.exit(app.exec_())
| 3,313
| 419
| 46
|
a7846b0d6899761dcc014e34e156d53e1610b615
| 4,461
|
py
|
Python
|
populationsim/steps/expand_households.py
|
bstabler/populationsim
|
872f7d26992e566c6fe75aed1cc14c8ad5f71d3a
|
[
"BSD-3-Clause"
] | null | null | null |
populationsim/steps/expand_households.py
|
bstabler/populationsim
|
872f7d26992e566c6fe75aed1cc14c8ad5f71d3a
|
[
"BSD-3-Clause"
] | null | null | null |
populationsim/steps/expand_households.py
|
bstabler/populationsim
|
872f7d26992e566c6fe75aed1cc14c8ad5f71d3a
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division
from __future__ import absolute_import
# PopulationSim
# See full license in LICENSE.txt.
import logging
import pandas as pd
import numpy as np
from activitysim.core import pipeline
from activitysim.core import inject
from activitysim.core.config import setting
from .helper import get_control_table
from .helper import get_weight_table
from .helper import weight_table_name
logger = logging.getLogger(__name__)
@inject.step()
def expand_households():
"""
Create a complete expanded synthetic household list with their assigned geographic zone ids.
This is the skeleton synthetic household id list with no household or person attributes,
one row per household, with geography columns and seed household table household_id.
Creates pipeline table expanded_household_ids
"""
if setting('NO_INTEGERIZATION_EVER', False):
logger.warning("skipping expand_households: NO_INTEGERIZATION_EVER")
inject.add_table('expanded_household_ids', pd.DataFrame())
return
geographies = setting('geographies')
household_id_col = setting('household_id_col')
low_geography = geographies[-1]
# only one we really need is low_geography
seed_geography = setting('seed_geography')
geography_cols = geographies[geographies.index(seed_geography):]
weights = get_weight_table(low_geography, sparse=True)
weights = weights[geography_cols + [household_id_col, 'integer_weight']]
# - expand weights table by integer_weight, so there is one row per desired hh
weight_cols = weights.columns.values
weights_np = np.repeat(weights.values, weights.integer_weight.values, axis=0)
expanded_weights = pd.DataFrame(data=weights_np, columns=weight_cols)
if setting('GROUP_BY_INCIDENCE_SIGNATURE'):
# the household_id_col is really the group_id
expanded_weights.rename(columns={household_id_col: 'group_id'}, inplace=True)
# the original incidence table with one row per hh, with index hh_id
household_groups = pipeline.get_table('household_groups')
household_groups = household_groups[[household_id_col, 'group_id', 'sample_weight']]
# for each group, lists of hh_ids and their sample_weights (as relative probabiliities)
# [ [ [<group_0_hh_id_list>], [<group_0_hh_prob_list>] ],
# [ [<group_1_hh_id_list>], [<group_1_hh_prob_list>] ], ... ]
HH_IDS = 0
HH_PROBS = 1
grouper = household_groups.groupby('group_id')
group_hh_probs = [0] * len(grouper)
for group_id, df in grouper:
hh_ids = list(df[household_id_col])
probs = list(df.sample_weight / df.sample_weight.sum())
group_hh_probs[group_id] = [hh_ids, probs]
# FIXME - should sample without replacement?
# now make a hh_id choice for each group_id in expanded_weights
expanded_weights[household_id_col] = \
expanded_weights.group_id.apply(chooser, convert_dtype=True,)
# FIXME - omit in production?
del expanded_weights['group_id']
del expanded_weights['integer_weight']
append = inject.get_step_arg('append', False)
replace = inject.get_step_arg('replace', False)
assert not (append and replace), "can't specify both append and replace for expand_households"
if append or replace:
t = inject.get_table('expanded_household_ids').to_frame()
prev_hhs = len(t.index)
added_hhs = len(expanded_weights.index)
if replace:
# FIXME - should really get from crosswalk table?
low_ids_to_replace = expanded_weights[low_geography].unique()
t = t[~t[low_geography].isin(low_ids_to_replace)]
expanded_weights = pd.concat([t, expanded_weights], ignore_index=True)
dropped_hhs = prev_hhs - len(t.index)
final_hhs = len(expanded_weights.index)
op = 'append' if append else 'replace'
logger.info("expand_households op: %s prev hh count %s dropped %s added %s final %s" %
(op, prev_hhs, dropped_hhs, added_hhs, final_hhs))
repop = inject.get_step_arg('repop', default=False)
inject.add_table('expanded_household_ids', expanded_weights, replace=repop)
| 39.131579
| 98
| 0.705447
|
from __future__ import division
from __future__ import absolute_import
# PopulationSim
# See full license in LICENSE.txt.
import logging
import pandas as pd
import numpy as np
from activitysim.core import pipeline
from activitysim.core import inject
from activitysim.core.config import setting
from .helper import get_control_table
from .helper import get_weight_table
from .helper import weight_table_name
logger = logging.getLogger(__name__)
@inject.step()
def expand_households():
"""
Create a complete expanded synthetic household list with their assigned geographic zone ids.
This is the skeleton synthetic household id list with no household or person attributes,
one row per household, with geography columns and seed household table household_id.
Creates pipeline table expanded_household_ids
"""
if setting('NO_INTEGERIZATION_EVER', False):
logger.warning("skipping expand_households: NO_INTEGERIZATION_EVER")
inject.add_table('expanded_household_ids', pd.DataFrame())
return
geographies = setting('geographies')
household_id_col = setting('household_id_col')
low_geography = geographies[-1]
# only one we really need is low_geography
seed_geography = setting('seed_geography')
geography_cols = geographies[geographies.index(seed_geography):]
weights = get_weight_table(low_geography, sparse=True)
weights = weights[geography_cols + [household_id_col, 'integer_weight']]
# - expand weights table by integer_weight, so there is one row per desired hh
weight_cols = weights.columns.values
weights_np = np.repeat(weights.values, weights.integer_weight.values, axis=0)
expanded_weights = pd.DataFrame(data=weights_np, columns=weight_cols)
if setting('GROUP_BY_INCIDENCE_SIGNATURE'):
# the household_id_col is really the group_id
expanded_weights.rename(columns={household_id_col: 'group_id'}, inplace=True)
# the original incidence table with one row per hh, with index hh_id
household_groups = pipeline.get_table('household_groups')
household_groups = household_groups[[household_id_col, 'group_id', 'sample_weight']]
# for each group, lists of hh_ids and their sample_weights (as relative probabiliities)
# [ [ [<group_0_hh_id_list>], [<group_0_hh_prob_list>] ],
# [ [<group_1_hh_id_list>], [<group_1_hh_prob_list>] ], ... ]
HH_IDS = 0
HH_PROBS = 1
grouper = household_groups.groupby('group_id')
group_hh_probs = [0] * len(grouper)
for group_id, df in grouper:
hh_ids = list(df[household_id_col])
probs = list(df.sample_weight / df.sample_weight.sum())
group_hh_probs[group_id] = [hh_ids, probs]
# FIXME - should sample without replacement?
# now make a hh_id choice for each group_id in expanded_weights
def chooser(group_id):
hh_ids = group_hh_probs[group_id][HH_IDS]
hh_probs = group_hh_probs[group_id][HH_PROBS]
return np.random.choice(hh_ids, p=hh_probs)
expanded_weights[household_id_col] = \
expanded_weights.group_id.apply(chooser, convert_dtype=True,)
# FIXME - omit in production?
del expanded_weights['group_id']
del expanded_weights['integer_weight']
append = inject.get_step_arg('append', False)
replace = inject.get_step_arg('replace', False)
assert not (append and replace), "can't specify both append and replace for expand_households"
if append or replace:
t = inject.get_table('expanded_household_ids').to_frame()
prev_hhs = len(t.index)
added_hhs = len(expanded_weights.index)
if replace:
# FIXME - should really get from crosswalk table?
low_ids_to_replace = expanded_weights[low_geography].unique()
t = t[~t[low_geography].isin(low_ids_to_replace)]
expanded_weights = pd.concat([t, expanded_weights], ignore_index=True)
dropped_hhs = prev_hhs - len(t.index)
final_hhs = len(expanded_weights.index)
op = 'append' if append else 'replace'
logger.info("expand_households op: %s prev hh count %s dropped %s added %s final %s" %
(op, prev_hhs, dropped_hhs, added_hhs, final_hhs))
repop = inject.get_step_arg('repop', default=False)
inject.add_table('expanded_household_ids', expanded_weights, replace=repop)
| 169
| 0
| 30
|
489733bd7746a78851184e57f552e8596df27f96
| 15,828
|
py
|
Python
|
lib/dataset/jacquard.py
|
pia32/ssds.pytorch
|
e596ecea37942153d82122fb85ad9de3feeb2363
|
[
"MIT"
] | null | null | null |
lib/dataset/jacquard.py
|
pia32/ssds.pytorch
|
e596ecea37942153d82122fb85ad9de3feeb2363
|
[
"MIT"
] | null | null | null |
lib/dataset/jacquard.py
|
pia32/ssds.pytorch
|
e596ecea37942153d82122fb85ad9de3feeb2363
|
[
"MIT"
] | null | null | null |
import os
import pickle
import os.path
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
from operator import itemgetter
from skimage.draw import polygon
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
VOC_CLASSES = ( '__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
VOC_CLASSES = ( '__background__', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20')
# for making bounding boxes pretty
COLORS = ((255, 0, 0, 128), (0, 255, 0, 128), (0, 0, 255, 128),
(0, 255, 255, 128), (255, 0, 255, 128), (255, 255, 0, 128))
class AnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __call__(self, target):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = np.empty((0,5))
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
#cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res = np.vstack((res,bndbox)) # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class JACQUARDDetection(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_id = self.ids[index]
return cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
# gt = self.target_transform(anno, 1, 1)
# gt = self.target_transform(anno)
# return img_id[1], gt
if self.target_transform is not None:
anno = self.target_transform(anno)
return anno
def pull_img_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
anno = ET.parse(self._annopath % img_id).getroot()
gt = self.target_transform(anno)
height, width, _ = img.shape
boxes = gt[:,:-1]
labels = gt[:,-1]
boxes[:, 0::2] /= width
boxes[:, 1::2] /= height
labels = np.expand_dims(labels,1)
targets = np.hstack((boxes,labels))
return img, targets
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
to_tensor = transforms.ToTensor()
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
self._write_voc_results_file(all_boxes)
aps,map = self._do_python_eval(output_dir)
return aps,map
def parse_rec(self, filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
# obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
## test
# if __name__ == '__main__':
# ds = VOCDetection('../../../../../dataset/VOCdevkit/', [('2012', 'train')],
# None, AnnotationTransform())
# print(len(ds))
# img, target = ds[0]
# print(target)
# ds.show(1)
| 34.940397
| 144
| 0.538287
|
import os
import pickle
import os.path
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
from operator import itemgetter
from skimage.draw import polygon
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
VOC_CLASSES = ( '__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
VOC_CLASSES = ( '__background__', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20')
# for making bounding boxes pretty
COLORS = ((255, 0, 0, 128), (0, 255, 0, 128), (0, 0, 255, 128),
(0, 255, 255, 128), (255, 0, 255, 128), (255, 255, 0, 128))
class AnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=True):
self.class_to_ind = class_to_ind or dict(
zip(VOC_CLASSES, range(len(VOC_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = np.empty((0,5))
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
#cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res = np.vstack((res,bndbox)) # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class JACQUARDDetection(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, image_sets, preproc=None, target_transform=AnnotationTransform(),
dataset_name='VOC0712'):
self.root = root
self.image_set = image_sets
self.preproc = preproc
self.target_transform = target_transform
self.name = dataset_name
self._annopath = os.path.join('%s', 'Annotations', '%s.xml')
self._imgpath = os.path.join('%s', 'JPEGImages', '%s.jpg')
self.ids = list()
for (year, name) in image_sets:
self._year = year
rootpath = os.path.join(self.root, 'VOC' + year)
for line in open(os.path.join(rootpath, 'ImageSets', 'Main', name + '.txt')):
self.ids.append((rootpath, line.strip()))
def __getitem__(self, index):
img_id = self.ids[index]
target = ET.parse(self._annopath % img_id).getroot()
img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target)
if self.preproc is not None:
img, target = self.preproc(img, target)
#print(img.size())
# target = self.target_transform(target, width, height)
#print(target.shape)
return img, target
def __len__(self):
return len(self.ids)
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_id = self.ids[index]
return cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
# gt = self.target_transform(anno, 1, 1)
# gt = self.target_transform(anno)
# return img_id[1], gt
if self.target_transform is not None:
anno = self.target_transform(anno)
return anno
def pull_img_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
anno = ET.parse(self._annopath % img_id).getroot()
gt = self.target_transform(anno)
height, width, _ = img.shape
boxes = gt[:,:-1]
labels = gt[:,-1]
boxes[:, 0::2] /= width
boxes[:, 1::2] /= height
labels = np.expand_dims(labels,1)
targets = np.hstack((boxes,labels))
return img, targets
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
to_tensor = transforms.ToTensor()
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
self._write_voc_results_file(all_boxes)
aps,map = self._do_python_eval(output_dir)
return aps,map
def _get_voc_results_file_template(self):
filename = 'comp4_det_test' + '_{:s}.txt'
filedir = os.path.join(
self.root, 'results', 'VOC' + self._year, 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(VOC_CLASSES):
cls_ind = cls_ind
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.ids):
index = index[1]
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
rootpath = os.path.join(self.root, 'VOC' + self._year)
name = self.image_set[0][1]
annopath = os.path.join(
rootpath,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
rootpath,
'ImageSets',
'Main',
name+'.txt')
cachedir = os.path.join(self.root, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if output_dir is not None and not os.path.isdir(output_dir):
os.mkdir(output_dir)
detDB = {}
for i, cls in enumerate(VOC_CLASSES):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
detfile = filename.format(cls)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
for j in range(len(image_ids)):
im_loc = image_ids[j]
conf_loc = confidence[j]
bb_loc = BB[j, :]
if im_loc not in detDB:
detDB[im_loc] = []
bb_entry = [conf_loc, int(cls), bb_loc[0], bb_loc[1], bb_loc[2], bb_loc[3]] #confidence, class, xmin, ymin, xmax, ymax
detDB[im_loc].append(bb_entry)
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
total = 0
suc = 0
for im in imagenames:#foreach image
if im not in detDB:
print("No detections for image", im)
continue
bbDB = sorted(detDB[im], key=itemgetter(0), reverse=True)
bestBB = bbDB[0]
gtbbs = self.parse_rec(annopath.format(im))
max_iou = self.calc_max_iou(bestBB, gtbbs)
total += 1
if max_iou > 0.25:
suc += 1
if total % 100 == 0:
print(suc, total, suc/total)
acc = suc / total
print("FINAL ACCURACY", acc)
return acc, acc
def bb_to_corners(self, bb, angle_classes = 19):
corners = np.zeros((4, 2))
x = (bb[4] + bb[2]) / 2.0
y = (bb[5] + bb[3]) / 2.0
width = bb[4] - bb[2]
height = bb[5] - bb[3]
angle = (bb[1] - 1) / angle_classes * np.pi
corners = np.zeros((4, 2));
corners[0, 0] = -width / 2;
corners[0, 1] = height / 2;
corners[1, 0] = width / 2;
corners[1, 1] = height / 2;
corners[2, 0] = width / 2;
corners[2, 1] = -height / 2;
corners[3, 0] = -width / 2;
corners[3, 1] = -height / 2;
rot = [[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]]
corners = np.dot(corners, rot)
corners = corners + np.array([x, y])
return corners, angle
def calc_max_iou(self, bb, gtbbs, visualize=False):
max_iou = 0
corners1, angle1 = self.bb_to_corners(bb)
if visualize:
img = np.zeros((1024, 1024, 3), np.uint8)
self.cv2corners(img, corners1, color=(0, 255, 0))
for i in range(len(gtbbs)):
gtbb = gtbbs[i]
gtbb = [1, int(gtbb['name']), gtbb['bbox'][0], gtbb['bbox'][1], gtbb['bbox'][2], gtbb['bbox'][3]]
corners2, angle2 = self.bb_to_corners(gtbb)
if visualize:
self.cv2corners(img, corners2)
if abs(angle2 - angle1) > np.pi / 6:
continue
iou = self.calc_iou(corners1, corners2)
max_iou = max(iou, max_iou)
if visualize:
print(max_iou)
cv2.imshow('result', img)
cv2.waitKey(0)
return max_iou
def calc_iou(self, corners1, corners2):
rr1, cc1 = polygon(corners1[:, 0], corners1[:, 1])
rr2, cc2 = polygon(corners2[:, 0], corners2[:, 1])
try:
r_max = max(rr1.max(), rr2.max()) + 1
c_max = max(cc1.max(), cc2.max()) + 1
except:
return 0
canvas = np.zeros((r_max, c_max))
canvas[rr1, cc1] += 1
canvas[rr2, cc2] += 1
union = np.sum(canvas > 0)
if union == 0:
return 0
intersection = np.sum(canvas == 2)
return intersection * 1.0 / union
def cv2corners(self, img, corners, color=(255, 0, 0)):
for i in range(4):
nextI = (i + 1) % 4
c1 = (int(corners[i, 0]), int(corners[i, 1]))
c2 = (int(corners[nextI, 0]), int(corners[nextI, 1]))
cv2.line(img, c1, c2, color, 3)
def parse_rec(self, filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
# obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def show(self, index):
img, target = self.__getitem__(index)
for obj in target:
obj = obj.astype(np.int)
cv2.rectangle(img, (obj[0], obj[1]), (obj[2], obj[3]), (255,0,0), 3)
cv2.imwrite('./image.jpg', img)
## test
# if __name__ == '__main__':
# ds = VOCDetection('../../../../../dataset/VOCdevkit/', [('2012', 'train')],
# None, AnnotationTransform())
# print(len(ds))
# img, target = ds[0]
# print(target)
# ds.show(1)
| 8,002
| 0
| 324
|
de9e357b7df8fea8daf14e222029818e3e784ecd
| 1,254
|
py
|
Python
|
recognition_plate_licenses.py
|
mezgoodle/opencv-tutorial
|
76f4e7f9002eb8f8e4879167aee670faa32c7b7c
|
[
"MIT"
] | 1
|
2021-11-11T17:03:21.000Z
|
2021-11-11T17:03:21.000Z
|
recognition_plate_licenses.py
|
mezgoodle/opencv-tutorial
|
76f4e7f9002eb8f8e4879167aee670faa32c7b7c
|
[
"MIT"
] | 1
|
2021-08-16T13:32:59.000Z
|
2021-08-17T09:25:13.000Z
|
recognition_plate_licenses.py
|
mezgoodle/opencv-tutorial
|
76f4e7f9002eb8f8e4879167aee670faa32c7b7c
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import imutils
import easyocr
from matplotlib import pyplot as plt
image = cv2.imread('images/license.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_filter = cv2.bilateralFilter(gray, 11, 15, 15)
edges = cv2.Canny(image_filter, 30, 200)
contours = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
position = None
for contour in contours:
approx = cv2.approxPolyDP(contour, 10, True)
number_of_edges = 4
if len(approx) == number_of_edges:
position = approx
break
mask = np.zeros(gray.shape, np.uint8)
new_image = cv2.drawContours(mask, [position], 0, 255, -1)
bitwise_image = cv2.bitwise_and(image, image, mask=mask)
x, y = np.where(mask == 255)
x1, y1 = np.min(x), np.min(y)
x2, y2 = np.max(x), np.max(y)
crop = gray[x1:x2, y1:y2]
text = easyocr.Reader(['en'])
text = text.readtext(crop)
result = text[0][-2]
final_image = cv2.putText(image, result, (x1, y2 + 60), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 1)
final_image = cv2.rectangle(image, (x1, x2), (y1, y2), (0, 255, 0), 1)
plt.imshow(cv2.cvtColor(final_image, cv2.COLOR_BGR2RGB))
plt.show()
| 29.162791
| 98
| 0.707337
|
import cv2
import numpy as np
import imutils
import easyocr
from matplotlib import pyplot as plt
image = cv2.imread('images/license.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_filter = cv2.bilateralFilter(gray, 11, 15, 15)
edges = cv2.Canny(image_filter, 30, 200)
contours = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
position = None
for contour in contours:
approx = cv2.approxPolyDP(contour, 10, True)
number_of_edges = 4
if len(approx) == number_of_edges:
position = approx
break
mask = np.zeros(gray.shape, np.uint8)
new_image = cv2.drawContours(mask, [position], 0, 255, -1)
bitwise_image = cv2.bitwise_and(image, image, mask=mask)
x, y = np.where(mask == 255)
x1, y1 = np.min(x), np.min(y)
x2, y2 = np.max(x), np.max(y)
crop = gray[x1:x2, y1:y2]
text = easyocr.Reader(['en'])
text = text.readtext(crop)
result = text[0][-2]
final_image = cv2.putText(image, result, (x1, y2 + 60), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 1)
final_image = cv2.rectangle(image, (x1, x2), (y1, y2), (0, 255, 0), 1)
plt.imshow(cv2.cvtColor(final_image, cv2.COLOR_BGR2RGB))
plt.show()
| 0
| 0
| 0
|
4bef42c0e917472e68a30cc758cdbfa6221dd7f3
| 1,108
|
py
|
Python
|
Fibonacci.py
|
Carloselrecharlie/Fiboncacci_sequence
|
a28cc61b35775dee2489fd07d676348200377660
|
[
"Apache-2.0"
] | null | null | null |
Fibonacci.py
|
Carloselrecharlie/Fiboncacci_sequence
|
a28cc61b35775dee2489fd07d676348200377660
|
[
"Apache-2.0"
] | null | null | null |
Fibonacci.py
|
Carloselrecharlie/Fiboncacci_sequence
|
a28cc61b35775dee2489fd07d676348200377660
|
[
"Apache-2.0"
] | null | null | null |
# Carlos Montes Parra
# A program that displays a number from the Fibonacci sequence. The position
# in the sequence is linked to the first and last letter of the user's name
# and the addition of their Unicode values.
# Adapted from one of Ian McLoughlin's lectures https://github.com/ianmcloughlin/python-fib/blob/master/fibname.py
name = "Montes"
first = name[0]
last = name[-1]
firstN = ord(first)
lastN = ord(last)
x = firstN + lastN
ans = fib(x)
print("My surname is", name)
print("The first letter", first, "is number", firstN)
print("The last letter", last, "is number", lastN)
print("Fibonacci number", x, "is", ans)
# SOLUTION
# My surname is Montes
# The first letter M is number 77
# The last letter s is number 115
# Fibonacci number 192 is 5972304273877744135569338397692020533504
# ord () is a python built-in function which returns the Unicode value linked to a one-character string. It's the opposite of chr() or unichr()
| 24.622222
| 143
| 0.700361
|
# Carlos Montes Parra
# A program that displays a number from the Fibonacci sequence. The position
# in the sequence is linked to the first and last letter of the user's name
# and the addition of their Unicode values.
# Adapted from one of Ian McLoughlin's lectures https://github.com/ianmcloughlin/python-fib/blob/master/fibname.py
def fib(n):
# This function returns the nth Fibonacci number
i = 0
j = 1
n = n - 1
while n >= 0:
i, j = j, i + j
n = n - 1
return i
name = "Montes"
first = name[0]
last = name[-1]
firstN = ord(first)
lastN = ord(last)
x = firstN + lastN
ans = fib(x)
print("My surname is", name)
print("The first letter", first, "is number", firstN)
print("The last letter", last, "is number", lastN)
print("Fibonacci number", x, "is", ans)
# SOLUTION
# My surname is Montes
# The first letter M is number 77
# The last letter s is number 115
# Fibonacci number 192 is 5972304273877744135569338397692020533504
# ord () is a python built-in function which returns the Unicode value linked to a one-character string. It's the opposite of chr() or unichr()
| 134
| 0
| 23
|
2255ee9719fac77ef535168206a055deff41e616
| 1,991
|
py
|
Python
|
results/get_xflickrco_many.py
|
danoneata/iglue
|
c4db972bc67f83f325a847093906b924503ccfc4
|
[
"MIT"
] | 15
|
2022-01-28T03:08:46.000Z
|
2022-03-24T09:24:07.000Z
|
results/get_xflickrco_many.py
|
danoneata/iglue
|
c4db972bc67f83f325a847093906b924503ccfc4
|
[
"MIT"
] | 4
|
2022-03-01T19:13:39.000Z
|
2022-03-27T08:21:46.000Z
|
results/get_xflickrco_many.py
|
danoneata/iglue
|
c4db972bc67f83f325a847093906b924503ccfc4
|
[
"MIT"
] | 2
|
2022-02-05T17:04:42.000Z
|
2022-03-11T14:52:47.000Z
|
import os
import argparse
import numpy as np
import pandas as pd
TASK = "xflickrco"
LANGS = ['de', 'ja']
SHOTS = [200, 500, 1000, 1500, '100x5', '200x5']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--models', type=str)
parser.add_argument('--exp_dir', type=str)
parser.add_argument('--version', type=str, default=None)
args = parser.parse_args()
version = f".{args.version}" if args.version is not None else ""
exp_dir = args.exp_dir + version
models = args.models.split(",")
for shot in SHOTS:
res_ir_df = pd.DataFrame(columns=['model']+LANGS)
res_tr_df = pd.DataFrame(columns=['model']+LANGS)
for model in models:
res_ir = dict()
res_tr = dict()
res_ir['model'] = model
res_tr['model'] = model
for lang in LANGS:
fn = os.path.join(exp_dir, model, TASK, lang, str(shot), f"test.out")
try:
lines = [l.strip() for l in open(fn).readlines() if l.startswith("Final")] #[-1]
ir1 = float(lines[-2].split()[1].split(",")[0].split(':')[1])
tr1 = float(lines[-1].split()[1].split(",")[0].split(':')[1])
res_ir[lang] = ir1
res_tr[lang] = tr1
except:
print(fn)
res_ir[lang] = -1.0
res_tr[lang] = -1.0
# avg_ir = np.mean([res_ir[lang] for lang in LANGS])
# avg_tr = np.mean([res_tr[lang] for lang in LANGS])
# res_ir['avg'] = avg_ir
# res_tr['avg'] = avg_tr
res_ir_df = res_ir_df.append(res_ir, ignore_index=True)
res_tr_df = res_tr_df.append(res_tr, ignore_index=True)
res_ir_df.to_csv(f"{TASK}/xFlickrCO-many_ir_{shot}{version}.csv", index=False)
res_tr_df.to_csv(f"{TASK}/xFlickrCO-many_tr_{shot}{version}.csv", index=False)
| 38.288462
| 100
| 0.542943
|
import os
import argparse
import numpy as np
import pandas as pd
TASK = "xflickrco"
LANGS = ['de', 'ja']
SHOTS = [200, 500, 1000, 1500, '100x5', '200x5']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--models', type=str)
parser.add_argument('--exp_dir', type=str)
parser.add_argument('--version', type=str, default=None)
args = parser.parse_args()
version = f".{args.version}" if args.version is not None else ""
exp_dir = args.exp_dir + version
models = args.models.split(",")
for shot in SHOTS:
res_ir_df = pd.DataFrame(columns=['model']+LANGS)
res_tr_df = pd.DataFrame(columns=['model']+LANGS)
for model in models:
res_ir = dict()
res_tr = dict()
res_ir['model'] = model
res_tr['model'] = model
for lang in LANGS:
fn = os.path.join(exp_dir, model, TASK, lang, str(shot), f"test.out")
try:
lines = [l.strip() for l in open(fn).readlines() if l.startswith("Final")] #[-1]
ir1 = float(lines[-2].split()[1].split(",")[0].split(':')[1])
tr1 = float(lines[-1].split()[1].split(",")[0].split(':')[1])
res_ir[lang] = ir1
res_tr[lang] = tr1
except:
print(fn)
res_ir[lang] = -1.0
res_tr[lang] = -1.0
# avg_ir = np.mean([res_ir[lang] for lang in LANGS])
# avg_tr = np.mean([res_tr[lang] for lang in LANGS])
# res_ir['avg'] = avg_ir
# res_tr['avg'] = avg_tr
res_ir_df = res_ir_df.append(res_ir, ignore_index=True)
res_tr_df = res_tr_df.append(res_tr, ignore_index=True)
res_ir_df.to_csv(f"{TASK}/xFlickrCO-many_ir_{shot}{version}.csv", index=False)
res_tr_df.to_csv(f"{TASK}/xFlickrCO-many_tr_{shot}{version}.csv", index=False)
| 0
| 0
| 0
|
4d2480f5a7dd624715e75d6e800820d479ea0649
| 241
|
py
|
Python
|
line_graph_2.py
|
kethan1/Scipy-Python
|
1f260ed12f0923dc044ee74a28d35cc1e6f6f821
|
[
"MIT"
] | null | null | null |
line_graph_2.py
|
kethan1/Scipy-Python
|
1f260ed12f0923dc044ee74a28d35cc1e6f6f821
|
[
"MIT"
] | null | null | null |
line_graph_2.py
|
kethan1/Scipy-Python
|
1f260ed12f0923dc044ee74a28d35cc1e6f6f821
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
plt.plot(list(index for index, y in enumerate(get_coords())), list(y for y in get_coords()), "y-.s")
plt.show()
| 20.083333
| 100
| 0.643154
|
import matplotlib.pyplot as plt
import numpy as np
def get_coords():
for x in range(1, 11):
yield x**2 - 3*x + 9
plt.plot(list(index for index, y in enumerate(get_coords())), list(y for y in get_coords()), "y-.s")
plt.show()
| 52
| 0
| 23
|
7b6ad7b68b1168b3c953cf0cc2a54b22b5266807
| 538
|
py
|
Python
|
quati/dataset/fields/affixes.py
|
onenoc/quati
|
ba372b2ad14076294af62cbcbc27e1b3ca8421c1
|
[
"MIT"
] | 2
|
2021-01-30T21:20:36.000Z
|
2021-01-30T22:15:07.000Z
|
quati/dataset/fields/affixes.py
|
onenoc/quati
|
ba372b2ad14076294af62cbcbc27e1b3ca8421c1
|
[
"MIT"
] | null | null | null |
quati/dataset/fields/affixes.py
|
onenoc/quati
|
ba372b2ad14076294af62cbcbc27e1b3ca8421c1
|
[
"MIT"
] | 1
|
2021-01-18T23:12:18.000Z
|
2021-01-18T23:12:18.000Z
|
from torchtext.data import Field
from quati import constants
from quati.dataset.vocabulary import Vocabulary
class AffixesField(Field):
"""
Defines a field for affixes (prefixes and suffixes) by setting only
unk_token and pad_token to their default constant value.
"""
| 26.9
| 71
| 0.630112
|
from torchtext.data import Field
from quati import constants
from quati.dataset.vocabulary import Vocabulary
class AffixesField(Field):
"""
Defines a field for affixes (prefixes and suffixes) by setting only
unk_token and pad_token to their default constant value.
"""
def __init__(self, **kwargs):
super().__init__(unk_token=constants.UNK,
pad_token=constants.PAD,
batch_first=True,
**kwargs)
self.vocab_cls = Vocabulary
| 222
| 0
| 26
|
8cb03c84a3e95c4b0475a50e2e10afbcec7f2753
| 7,452
|
py
|
Python
|
afka/afk.py
|
FlorianLudwig/afka
|
4efaf7f461e0f79827b3c138059ff01cb7088ff6
|
[
"Apache-2.0"
] | 3
|
2020-12-23T21:46:38.000Z
|
2021-01-11T19:23:57.000Z
|
afka/afk.py
|
FlorianLudwig/afka
|
4efaf7f461e0f79827b3c138059ff01cb7088ff6
|
[
"Apache-2.0"
] | null | null | null |
afka/afk.py
|
FlorianLudwig/afka
|
4efaf7f461e0f79827b3c138059ff01cb7088ff6
|
[
"Apache-2.0"
] | null | null | null |
"""Interface to the game"""
import anre
import time
import enum
import logging
import pkg_resources
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
AVOID_DOUBLE_TAB_DELAY = 0.1
| 34.183486
| 97
| 0.621981
|
"""Interface to the game"""
import anre
import time
import enum
import logging
import pkg_resources
class MainScreen(enum.Enum):
RANHORN = "ranhorn"
DARK_FOREST = "dark_forest"
CAMPAIGN = "campaign"
HEROS = "heros"
CHAT = "chat"
class Screen(enum.Enum):
MAIL = "mail"
BAG = "bag"
FRIENDS = "friends"
QUESTS_DAILIES = "quests_dailies"
QUESTS_WEEKLY = "quests_weekly"
QUESTS_CAMPAIGN = "quests_campaign"
BOUNTY_QUESTS = "bounty_quests"
BOUNTY_TEAM = "bounty_team"
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
AVOID_DOUBLE_TAB_DELAY = 0.1
class AFKArena:
def __init__(self, ar: anre.Anre) -> None:
self.ar = ar
self.activity_name = "com.lilithgame.hgame.gp"
self.current_screen = MainScreen.CAMPAIGN
def start_app(self):
self.ar.start_app(self.activity_name)
def close_app(self):
self.ar.close_app(self.activity_name)
def wait_until_loaded(self):
LOG.info("waiting till app is loaded")
while self.ar.get_pixel("50%", -1, update=True) != (242, 225, 145):
time.sleep(0.1)
LOG.info("afk arena done loading")
def switch_to(self, target):
LOG.debug("switch from %s to %s", self.current_screen, target)
if self.current_screen == target:
return
main_screens = {
MainScreen.RANHORN: "10%",
MainScreen.DARK_FOREST: "30%",
MainScreen.CAMPAIGN: "50%",
MainScreen.HEROS: "70%",
MainScreen.CHAT: "90%",
}
if target == Screen.FRIENDS:
self.switch_to(MainScreen.CAMPAIGN)
self.click_all_image("menu_arrow")
self.tap_image("friends")
elif target == Screen.MAIL:
self.switch_to(MainScreen.CAMPAIGN)
self.click_all_image("menu_arrow", timeout=3)
self.tap_image("mail")
elif target == "guild":
self.switch_to(MainScreen.RANHORN)
self.ar.tap("30%", "15%")
time.sleep(AVOID_DOUBLE_TAB_DELAY)
elif target == Screen.QUESTS_DAILIES:
self.switch_to(MainScreen.CAMPAIGN)
self.tap_image("quests")
time.sleep(AVOID_DOUBLE_TAB_DELAY)
elif target == Screen.BOUNTY_QUESTS:
self.switch_to(MainScreen.DARK_FOREST)
self.tap_image("bounty_board")
time.sleep(AVOID_DOUBLE_TAB_DELAY)
elif target == "guild_hunting":
self.switch_to("guild")
self.tap_image("guild_hunting")
elif target in main_screens:
# we want to go to one of the main screens, ensure there
# is no back button visible (meaning we are in one of the)
# subscreens
self.click_all_image("back", timeout=5)
x = main_screens[target]
self.ar.tap(x, -10)
else:
raise AttributeError(f"Unkown screen '{target}'")
# TODO verify we successfully reached desired screen
self.current_screen = target
def tap_image(self, image_name, scale=1.0, threshold=0.9, timeout=60):
collect = pkg_resources.resource_filename("afka", f"res/{image_name}.png")
return self.ar.tap_image(collect, scale=scale, threshold=threshold, timeout=timeout)
def find_image(self, image_name, scale=1.0):
collect = pkg_resources.resource_filename("afka", f"res/{image_name}.png")
return self.ar.find_image(collect, scale=scale)
def wait_for_image(self, image_name, scale=1.0, timeout=60, threshold=0.9):
collect = pkg_resources.resource_filename("afka", f"res/{image_name}.png")
return self.ar.wait_for_image(collect, timeout=timeout, threshold=threshold, scale=scale)
def loot_afk_chest(self):
LOG.info("starting loot_afk_chest")
self.switch_to(MainScreen.CAMPAIGN)
self.ar.tap("50%", -450) # tap on pile of loot
self.tap_image("blue_button") # tap collect button
time.sleep(AVOID_DOUBLE_TAB_DELAY)
LOG.info("done loot_afk_chest")
def loot_fast_rewards(self, spend_diamonds=False):
LOG.info("starting loot_fast_rewards")
self.switch_to(MainScreen.CAMPAIGN)
self.tap_image("fast_rewards")
self.wait_for_image("popup")
conf, x, y = self.find_image("collect_for_50")
if conf >= 0.8:
if spend_diamonds:
self.ar.tap(x, y)
else:
LOG.info("fast rewards only available for diamonds and spend_diamonds=False")
self.ar.tap(10, 10)
time.sleep(AVOID_DOUBLE_TAB_DELAY)
LOG.info("done loot_fast_rewards")
return
else:
self.tap_image("collect_yellow")
time.sleep(AVOID_DOUBLE_TAB_DELAY)
self.ar.tap(10, 10)
time.sleep(AVOID_DOUBLE_TAB_DELAY)
self.ar.tap(10, 10)
time.sleep(AVOID_DOUBLE_TAB_DELAY)
LOG.info("done loot_fast_rewards")
def friends_send_and_receive(self):
LOG.info("starting friends_send_and_receive")
self.switch_to(Screen.FRIENDS)
# tap Send & Receive button
self.tap_image("and")
LOG.info("done friends_send_and_receive")
def collect_mail(self):
LOG.info("starting collect_mail")
self.switch_to(Screen.MAIL)
self.tap_image("blue_button")
LOG.info("done collect_mail")
def guild_hunt(self):
LOG.info("starting guild_hunt")
self.switch_to("guild_hunting")
for _ in range(2):
try:
self.tap_image("guild_hunt_challenge", timeout=10) # start challenge
except ValueError:
print("Looks like guild hunting already done")
break
time.sleep(0.1)
self.tap_image("guild_hunt_challenge") # begin battle (confirms formation)
time.sleep(60)
self.tap_image("tap_to_close")
self.tap_image("tap_to_close")
LOG.info("done guild_hunt")
def click_all_image(self, image_name, scale=1.0, threshold=0.9, timeout=5):
LOG.debug("Click all %s images", image_name)
while True:
self.ar.update_screencap()
try:
self.tap_image(image_name, scale=scale, threshold=threshold, timeout=timeout)
except ValueError:
return
def collect_quest_rewards(self):
LOG.info("starting collect_quest_rewards")
self.switch_to(Screen.QUESTS_DAILIES)
self.click_all_image("blue_button", threshold=0.7, scale=0.85)
self.click_all_image("quest_reward", threshold=0.7, scale=0.85)
# self.switch_to(Screen.QUESTS_DAILIES)
# self.switch_to("quests_campaign")
LOG.info("done collect_quest_rewards")
def fight_campaign(self):
LOG.info("starting fight_campaign")
self.switch_to(MainScreen.CAMPAIGN)
self.tap_image("guild_hunt_challenge") # begin battle
time.sleep(1)
self.ar.update_screencap()
self.tap_image("guild_hunt_challenge") # begin battle (confirms formation)
self.tap_image("touch_screen_to_continue", timeout=160)
self.tap_image("touch_screen_to_continue", timeout=10)
LOG.info("done collect_quest_rewards")
def close(self):
self.ar.close()
| 6,333
| 364
| 535
|
b15752d7594b80ee0f099a7a48c0f2d982a29ae8
| 1,379
|
py
|
Python
|
glazier/lib/file_util_test.py
|
ofek/glazier
|
9f2a7ef98594fb61b5f3d368ea3c97294ed3a54b
|
[
"Apache-2.0"
] | null | null | null |
glazier/lib/file_util_test.py
|
ofek/glazier
|
9f2a7ef98594fb61b5f3d368ea3c97294ed3a54b
|
[
"Apache-2.0"
] | null | null | null |
glazier/lib/file_util_test.py
|
ofek/glazier
|
9f2a7ef98594fb61b5f3d368ea3c97294ed3a54b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.file_util."""
from pyfakefs import fake_filesystem
from glazier.lib import file_util
from absl.testing import absltest
if __name__ == '__main__':
absltest.main()
| 34.475
| 74
| 0.750544
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.file_util."""
from pyfakefs import fake_filesystem
from glazier.lib import file_util
from absl.testing import absltest
class FileUtilTest(absltest.TestCase):
def setUp(self):
super(FileUtilTest, self).setUp()
self.filesystem = fake_filesystem.FakeFilesystem()
file_util.os = fake_filesystem.FakeOsModule(self.filesystem)
file_util.open = fake_filesystem.FakeFileOpen(self.filesystem)
def testCreateDirectories(self):
self.filesystem.CreateFile('/test')
self.assertRaises(file_util.Error, file_util.CreateDirectories,
'/test/file.txt')
file_util.CreateDirectories('/tmp/test/path/file.log')
self.assertTrue(self.filesystem.Exists('/tmp/test/path'))
if __name__ == '__main__':
absltest.main()
| 500
| 17
| 73
|
8539f395aacbb82d340e074717bfd772bb0e27e1
| 16,779
|
py
|
Python
|
py/test/testing/acceptance_test.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 1
|
2019-05-27T00:58:46.000Z
|
2019-05-27T00:58:46.000Z
|
py/test/testing/acceptance_test.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
py/test/testing/acceptance_test.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
import py
pydir = py.path.local(py.__file__).dirpath()
pytestpath = pydir.join("bin", "py.test")
EXPECTTIMEOUT=10.0
| 33.558
| 92
| 0.484236
|
import py
pydir = py.path.local(py.__file__).dirpath()
pytestpath = pydir.join("bin", "py.test")
EXPECTTIMEOUT=10.0
class TestGeneralUsage:
def test_config_error(self, testdir):
testdir.makeconftest("""
class ConftestPlugin:
def pytest_configure(self, config):
raise config.Error("hello")
""")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
assert result.stderr.fnmatch_lines([
'*ERROR: hello'
])
def test_basetemp(self, testdir):
mytemp = testdir.tmpdir.mkdir("mytemp")
p = testdir.makepyfile("""
import py
def test_1():
py.test.ensuretemp('xyz')
""")
result = testdir.runpytest(p, '--basetemp=%s' %mytemp)
assert result.ret == 0
assert mytemp.join('xyz').check(dir=1)
def test_assertion_magic(self, testdir):
p = testdir.makepyfile("""
def test_this():
x = 0
assert x
""")
result = testdir.runpytest(p)
extra = result.stdout.fnmatch_lines([
"> assert x",
"E assert 0",
])
assert result.ret == 1
def test_collectonly_simple(self, testdir):
p = testdir.makepyfile("""
def test_func1():
pass
class TestClass:
def test_method(self):
pass
""")
result = testdir.runpytest("--collectonly", p)
stderr = result.stderr.str().strip()
assert stderr.startswith("inserting into sys.path")
assert result.ret == 0
extra = result.stdout.fnmatch_lines(py.code.Source("""
<Module '*.py'>
<Function 'test_func1'*>
<Class 'TestClass'>
<Instance '()'>
<Function 'test_method'*>
""").strip())
def test_collectonly_error(self, testdir):
p = testdir.makepyfile("import Errlkjqweqwe")
result = testdir.runpytest("--collectonly", p)
stderr = result.stderr.str().strip()
assert stderr.startswith("inserting into sys.path")
assert result.ret == 1
extra = result.stdout.fnmatch_lines(py.code.Source("""
<Module '*.py'>
*ImportError*
!!!*failures*!!!
*test_collectonly_error.py:1*
""").strip())
def test_nested_import_error(self, testdir):
p = testdir.makepyfile("""
import import_fails
def test_this():
assert import_fails.a == 1
""")
testdir.makepyfile(import_fails="import does_not_work")
result = testdir.runpytest(p)
extra = result.stdout.fnmatch_lines([
"> import import_fails",
"E ImportError: No module named does_not_work",
])
assert result.ret == 1
def test_skipped_reasons(self, testdir):
testdir.makepyfile(
test_one="""
from conftest import doskip
def setup_function(func):
doskip()
def test_func():
pass
class TestClass:
def test_method(self):
doskip()
""",
test_two = """
from conftest import doskip
doskip()
""",
conftest = """
import py
def doskip():
py.test.skip('test')
"""
)
result = testdir.runpytest()
extra = result.stdout.fnmatch_lines([
"*test_one.py ss",
"*test_two.py S",
"___* skipped test summary *_",
"*conftest.py:3: *3* Skipped: 'test'",
])
assert result.ret == 0
def test_deselected(self, testdir):
testpath = testdir.makepyfile("""
def test_one():
pass
def test_two():
pass
def test_three():
pass
"""
)
result = testdir.runpytest("-k", "test_two:", testpath)
extra = result.stdout.fnmatch_lines([
"*test_deselected.py ..",
"=* 1 test*deselected by 'test_two:'*=",
])
assert result.ret == 0
def test_no_skip_summary_if_failure(self, testdir):
testdir.makepyfile("""
import py
def test_ok():
pass
def test_fail():
assert 0
def test_skip():
py.test.skip("dontshow")
""")
result = testdir.runpytest()
assert result.stdout.str().find("skip test summary") == -1
assert result.ret == 1
def test_passes(self, testdir):
p1 = testdir.makepyfile("""
def test_passes():
pass
class TestClass:
def test_method(self):
pass
""")
old = p1.dirpath().chdir()
try:
result = testdir.runpytest()
finally:
old.chdir()
extra = result.stdout.fnmatch_lines([
"test_passes.py ..",
"* 2 pass*",
])
assert result.ret == 0
def test_header_trailer_info(self, testdir):
p1 = testdir.makepyfile("""
def test_passes():
pass
""")
result = testdir.runpytest()
verinfo = ".".join(map(str, py.std.sys.version_info[:3]))
extra = result.stdout.fnmatch_lines([
"*===== test session starts ====*",
"python: platform %s -- Python %s*" %(
py.std.sys.platform, verinfo), # , py.std.sys.executable),
"*test_header_trailer_info.py .",
"=* 1 passed in *.[0-9][0-9] seconds *=",
])
def test_traceback_failure(self, testdir):
p1 = testdir.makepyfile("""
def g():
return 2
def f(x):
assert x == g()
def test_onefails():
f(3)
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*test_traceback_failure.py F",
"====* FAILURES *====",
"____*____",
"",
" def test_onefails():",
"> f(3)",
"",
"*test_*.py:6: ",
"_ _ _ *",
#"",
" def f(x):",
"> assert x == g()",
"E assert 3 == 2",
"E + where 2 = g()",
"",
"*test_traceback_failure.py:4: AssertionError"
])
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile("""
import sys
def test_capturing():
print 42
print >>sys.stderr, 23
def test_capturing_error():
print 1
print >>sys.stderr, 2
raise ValueError
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*test_capturing_outerr.py .F",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout ---*",
"1",
"*--- Captured stderr ---*",
"2",
])
def test_showlocals(self, testdir):
p1 = testdir.makepyfile("""
def test_showlocals():
x = 3
y = "x" * 5000
assert 0
""")
result = testdir.runpytest(p1, '-l')
result.stdout.fnmatch_lines([
#"_ _ * Locals *",
"x* = 3",
"y* = 'xxxxxx*"
])
def test_verbose_reporting(self, testdir):
p1 = testdir.makepyfile("""
import py
def test_fail():
raise ValueError()
def test_pass():
pass
class TestClass:
def test_skip(self):
py.test.skip("hello")
def test_gen():
def check(x):
assert x == 1
yield check, 0
""")
result = testdir.runpytest(p1, '-v')
result.stdout.fnmatch_lines([
"*test_verbose_reporting.py:2: test_fail*FAIL*",
"*test_verbose_reporting.py:4: test_pass*PASS*",
"*test_verbose_reporting.py:7: TestClass.test_skip*SKIP*",
"*test_verbose_reporting.py:10: test_gen*FAIL*",
])
assert result.ret == 1
result = testdir.runpytest(p1, '-v', '-n 1')
result.stdout.fnmatch_lines([
"*FAIL*test_verbose_reporting.py:2: test_fail*",
])
assert result.ret == 1
class TestDistribution:
def test_dist_conftest_options(self, testdir):
p1 = testdir.tmpdir.ensure("dir", 'p1.py')
p1.dirpath("__init__.py").write("")
p1.dirpath("conftest.py").write(py.code.Source("""
print "importing conftest", __file__
import py
Option = py.test.config.Option
option = py.test.config.addoptions("someopt",
Option('--someopt', action="store_true", dest="someopt", default=False))
dist_rsync_roots = ['../dir']
print "added options", option
print "config file seen from conftest", py.test.config
"""))
p1.write(py.code.Source("""
import py, conftest
def test_1():
print "config from test_1", py.test.config
print "conftest from test_1", conftest.__file__
print "test_1: py.test.config.option.someopt", py.test.config.option.someopt
print "test_1: conftest", conftest
print "test_1: conftest.option.someopt", conftest.option.someopt
assert conftest.option.someopt
"""))
result = testdir.runpytest('-d', '--tx=popen', p1, '--someopt')
assert result.ret == 0
extra = result.stdout.fnmatch_lines([
"*1 passed*",
])
def test_manytests_to_one_popen(self, testdir):
p1 = testdir.makepyfile("""
import py
def test_fail0():
assert 0
def test_fail1():
raise ValueError()
def test_ok():
pass
def test_skip():
py.test.skip("hello")
""",
)
result = testdir.runpytest(p1, '-d', '--tx=popen', '--tx=popen')
result.stdout.fnmatch_lines([
"*1*popen*Python*",
"*2*popen*Python*",
"*2 failed, 1 passed, 1 skipped*",
])
assert result.ret == 1
def test_dist_conftest_specified(self, testdir):
p1 = testdir.makepyfile("""
import py
def test_fail0():
assert 0
def test_fail1():
raise ValueError()
def test_ok():
pass
def test_skip():
py.test.skip("hello")
""",
)
testdir.makeconftest("""
pytest_option_tx = 'popen popen popen'.split()
""")
result = testdir.runpytest(p1, '-d')
result.stdout.fnmatch_lines([
"*1*popen*Python*",
"*2*popen*Python*",
"*3*popen*Python*",
"*2 failed, 1 passed, 1 skipped*",
])
assert result.ret == 1
def test_dist_tests_with_crash(self, testdir):
if not hasattr(py.std.os, 'kill'):
py.test.skip("no os.kill")
p1 = testdir.makepyfile("""
import py
def test_fail0():
assert 0
def test_fail1():
raise ValueError()
def test_ok():
pass
def test_skip():
py.test.skip("hello")
def test_crash():
import time
import os
time.sleep(0.5)
os.kill(os.getpid(), 15)
"""
)
result = testdir.runpytest(p1, '-d', '--tx=3*popen')
result.stdout.fnmatch_lines([
"*popen*Python*",
"*popen*Python*",
"*popen*Python*",
"*node down*",
"*3 failed, 1 passed, 1 skipped*"
])
assert result.ret == 1
def test_distribution_rsyncdirs_example(self, testdir):
source = testdir.mkdir("source")
dest = testdir.mkdir("dest")
subdir = source.mkdir("example_pkg")
subdir.ensure("__init__.py")
p = subdir.join("test_one.py")
p.write("def test_5(): assert not __file__.startswith(%r)" % str(p))
result = testdir.runpytest("-d", "--rsyncdir=%(subdir)s" % locals(),
"--tx=popen//chdir=%(dest)s" % locals(), p)
assert result.ret == 0
result.stdout.fnmatch_lines([
"*1* *popen*platform*",
#"RSyncStart: [G1]",
#"RSyncFinished: [G1]",
"*1 passed*"
])
assert dest.join(subdir.basename).check(dir=1)
def test_dist_each(self, testdir):
interpreters = []
for name in ("python2.4", "python2.5"):
interp = py.path.local.sysfind(name)
if interp is None:
py.test.skip("%s not found" % name)
interpreters.append(interp)
testdir.makepyfile(__init__="", test_one="""
import sys
def test_hello():
print "%s...%s" % sys.version_info[:2]
assert 0
""")
args = ["--dist=each"]
args += ["--tx", "popen//python=%s" % interpreters[0]]
args += ["--tx", "popen//python=%s" % interpreters[1]]
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(["2...4"])
result.stdout.fnmatch_lines(["2...5"])
class TestInteractive:
def getspawn(self, tmpdir):
pexpect = py.test.importorskip("pexpect")
basetemp = tmpdir.mkdir("basetemp")
def spawn(cmd):
cmd = cmd + " --basetemp=" + str(basetemp)
return pexpect.spawn(cmd, logfile=tmpdir.join("spawn.out").open("w"))
return spawn
def requirespexpect(self, version_needed):
pexpect = py.test.importorskip("pexpect")
ver = tuple(map(int, pexpect.__version__.split(".")))
if ver < version_needed:
py.test.skip("pexpect version %s needed" %(".".join(map(str, version_needed))))
def test_pdb_interaction(self, testdir):
self.requirespexpect((2,3))
spawn = self.getspawn(testdir.tmpdir)
p1 = testdir.makepyfile("""
def test_1():
i = 0
assert i == 1
""")
child = spawn("%s %s --pdb %s" % (py.std.sys.executable, pytestpath, p1))
child.timeout = EXPECTTIMEOUT
#child.expect(".*def test_1.*")
child.expect(".*i = 0.*")
child.expect("(Pdb)")
child.sendeof()
child.expect("1 failed")
if child.isalive():
child.wait()
def test_simple_looponfail_interaction(self, testdir):
spawn = self.getspawn(testdir.tmpdir)
p1 = testdir.makepyfile("""
def test_1():
assert 1 == 0
""")
p1.setmtime(p1.mtime() - 50.0)
child = spawn("%s %s --looponfail %s" % (py.std.sys.executable, pytestpath, p1))
child.timeout = EXPECTTIMEOUT
child.expect("assert 1 == 0")
child.expect("test_simple_looponfail_interaction.py:")
child.expect("1 failed")
child.expect("waiting for changes")
p1.write(py.code.Source("""
def test_1():
assert 1 == 1
"""))
child.expect("MODIFIED.*test_simple_looponfail_interaction.py", timeout=4.0)
child.expect("1 passed", timeout=5.0)
child.kill(15)
class TestKeyboardInterrupt:
def test_raised_in_testfunction(self, testdir):
p1 = testdir.makepyfile("""
import py
def test_fail():
raise ValueError()
def test_inter():
raise KeyboardInterrupt()
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
#"*test_inter() INTERRUPTED",
"*KEYBOARD INTERRUPT*",
"*1 failed*",
])
| 15,826
| 12
| 821
|
8e9bc7828805b397069b874d1e34c619691420e9
| 438
|
py
|
Python
|
0019.remove_nth_from_end/solution.py
|
WZMJ/Algorithms
|
07f648541d38e24df38bda469665c12df6a50637
|
[
"MIT"
] | 5
|
2020-05-23T02:18:26.000Z
|
2021-07-05T05:36:01.000Z
|
0019.remove_nth_from_end/solution.py
|
WZMJ/Algorithms
|
07f648541d38e24df38bda469665c12df6a50637
|
[
"MIT"
] | 1
|
2020-06-10T07:17:24.000Z
|
2020-07-20T02:21:24.000Z
|
0019.remove_nth_from_end/solution.py
|
WZMJ/Algorithms
|
07f648541d38e24df38bda469665c12df6a50637
|
[
"MIT"
] | 1
|
2019-04-23T13:01:50.000Z
|
2019-04-23T13:01:50.000Z
|
from utils import ListNode
| 24.333333
| 70
| 0.531963
|
from utils import ListNode
class Solution:
def remove_nth_from_end(self, head: ListNode, n: int) -> ListNode:
if not head:
return
ans = ListNode(0)
ans.next = head
fast, slow = ans, ans
for _ in range(n + 1):
fast = fast.next
while fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return ans.next
| 367
| -6
| 49
|
f8c6f04cdc8ad4766b5acd9b10988424b0f57589
| 698
|
py
|
Python
|
services/backend/thiamsu/migrations/0011_auto_20171124_1827.py
|
LKKTGB/thiamsu
|
f08d453c6b35c801c57f2501e42565da56900814
|
[
"MIT"
] | 10
|
2020-08-25T08:57:36.000Z
|
2021-12-31T01:04:18.000Z
|
services/backend/thiamsu/migrations/0011_auto_20171124_1827.py
|
LKKTGB/thiamsu
|
f08d453c6b35c801c57f2501e42565da56900814
|
[
"MIT"
] | 13
|
2020-04-26T08:41:30.000Z
|
2021-06-10T17:34:25.000Z
|
services/backend/thiamsu/migrations/0011_auto_20171124_1827.py
|
LKKTGB/thiamsu
|
f08d453c6b35c801c57f2501e42565da56900814
|
[
"MIT"
] | 1
|
2020-09-06T17:54:13.000Z
|
2020-09-06T17:54:13.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-24 18:27
from __future__ import unicode_literals
from django.db import migrations
| 33.238095
| 85
| 0.706304
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-24 18:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("thiamsu", "0010_auto_20171124_1825")]
operations = [
migrations.AlterUniqueTogether(
name="approvedtranslation", unique_together=set([])
),
migrations.RemoveField(model_name="approvedtranslation", name="reviewer"),
migrations.RemoveField(model_name="approvedtranslation", name="song"),
migrations.RemoveField(model_name="approvedtranslation", name="translation"),
migrations.DeleteModel(name="ApprovedTranslation"),
]
| 0
| 527
| 23
|
1194c5f3a7614e6cd136e63d1b858bc694041838
| 2,655
|
py
|
Python
|
main.py
|
lperepol/PhylumNematoda
|
7d173f5d4dd8ecd0707af0e05333bdce9a873348
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
lperepol/PhylumNematoda
|
7d173f5d4dd8ecd0707af0e05333bdce9a873348
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
lperepol/PhylumNematoda
|
7d173f5d4dd8ecd0707af0e05333bdce9a873348
|
[
"CC0-1.0"
] | null | null | null |
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from graphviz import Digraph
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| 45
| 131
| 0.695669
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from graphviz import Digraph
def draw():
g = Digraph('Nematoda Key', comment="FOO",filename = 'NematodaKey.gv')#, node_attr={'color': 'lightblue2', 'style': 'filled'} )
g.graph_attr['rankdir'] = 'LR'
g.node('001', label='Phylum\nNematoda',fillcolor='red',style="filled")
g.node('Class\nChromadorea', shape='doubleoctagon', color='blue', fillcolor='aquamarine3',style="rounded,filled")
g.node('Class\nEnoplea', shape='doubleoctagon', color='blue', fillcolor='aquamarine3',style="rounded,filled")
g.edge('001','Amphids')
g.edge('001','Cuticle')
g.edge('001','Phasmids')
g.edge('001','Esophagus')
g.edge('001','Excretory\nSystem')
g.edge('001','Females')
g.edge('001','Caudal\nAlae')
g.edge('001','Males')
g.edge('Amphids','Class\nChromadorea', label='Labial\npost-labial\npore-like\nslit-like\nelaborate coils\nspirals')
g.edge('Cuticle','Class\nChromadorea', label='annulated\nornamented with\nprojections and setae.')
g.edge('Phasmids','Class\nChromadorea', label='Distinct\nor indistinct\nposterior')
g.edge('Esophagus','Class\nChromadorea', label='Divided into bulbs\n3 to 5 esophageal glands')
g.edge('Excretory\nSystem','Class\nChromadorea', label='Glandular\nor tubular.')
g.edge('Females','Class\nChromadorea', label='One or\n two ovaries.')
g.edge('Caudal\nAlae','Class\nChromadorea', label='Present or\nabsent')
g.edge('Amphids','Class\nEnoplea', label='Pocket like\nnot spiral\npost-labial')
g.edge('Cuticle','Class\nEnoplea', label='Smooth or\nfinely striated')
g.edge('Phasmids','Class\nEnoplea', label='Present or\nabsent')
g.edge('Esophagus','Class\nEnoplea', label='Cylindrical or bottle-shaped\n3 to 5 esophageal glands\nstichosome\nor trophosome')
g.edge('Excretory\nSystem','Class\nEnoplea', label='Simple non-tubular\nSingle cell')
g.edge('Females','Class\nEnoplea', label='Two ovaries')
g.edge('Males','Class\nEnoplea', label='Two testes.')
g.edge('Caudal\nAlae','Class\nEnoplea', label='Rare')
g.render('NematodaKey.gv', format='svg', view=True)
g.render('NematodaKey.gv', format='jpg', view=False)
g.render('NematodaKey.gv', format='pdf', view=False)
def main():
# Use a breakpoint in the code line below to debug your script.
draw()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| 2,224
| 0
| 45
|
83dd62b4bbf11a4bce1f327715899650e6158812
| 9,611
|
py
|
Python
|
gen2-mjpeg-streaming/main.py
|
joey1442/depthai-experiments
|
de2843812c4232a462eb8ab1703e7fd904f6f7b8
|
[
"MIT"
] | 1
|
2022-01-14T13:59:55.000Z
|
2022-01-14T13:59:55.000Z
|
gen2-mjpeg-streaming/main.py
|
joey1442/depthai-experiments
|
de2843812c4232a462eb8ab1703e7fd904f6f7b8
|
[
"MIT"
] | null | null | null |
gen2-mjpeg-streaming/main.py
|
joey1442/depthai-experiments
|
de2843812c4232a462eb8ab1703e7fd904f6f7b8
|
[
"MIT"
] | 2
|
2022-02-14T13:41:09.000Z
|
2022-03-03T00:46:21.000Z
|
import json
import socketserver
import threading
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from io import BytesIO
from pathlib import Path
import sys
from socketserver import ThreadingMixIn
from time import sleep
import depthai as dai
import numpy as np
import cv2
from PIL import Image
import blobconverter
HTTP_SERVER_PORT = 8090
# HTTPServer MJPEG
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
pass
# start TCP data server
server_TCP = socketserver.TCPServer(('localhost', 8070), TCPServerRequest)
th = threading.Thread(target=server_TCP.serve_forever)
th.daemon = True
th.start()
# start MJPEG HTTP Server
server_HTTP = ThreadedHTTPServer(('localhost', HTTP_SERVER_PORT), VideoStreamHandler)
th2 = threading.Thread(target=server_HTTP.serve_forever)
th2.daemon = True
th2.start()
# MobilenetSSD label texts
labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
syncNN = True
# Pipeline is defined, now we can connect to the device
with dai.Device(dai.OpenVINO.Version.VERSION_2021_2) as device:
cams = device.getConnectedCameras()
depth_enabled = dai.CameraBoardSocket.LEFT in cams and dai.CameraBoardSocket.RIGHT in cams
# Start pipeline
device.startPipeline(create_pipeline(depth_enabled))
print(f"DepthAI is up & running. Navigate to 'localhost:{str(HTTP_SERVER_PORT)}' with Chrome to see the mjpeg stream")
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
if depth_enabled:
xoutBoundingBoxDepthMapping = device.getOutputQueue(name="boundingBoxDepthMapping", maxSize=4, blocking=False)
depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
frame = None
detections = []
startTime = time.monotonic()
counter = 0
fps = 0
color = (255, 255, 255)
while True:
inPreview = previewQueue.get()
frame = inPreview.getCvFrame()
inNN = detectionNNQueue.get()
detections = inNN.detections
counter+=1
current_time = time.monotonic()
if (current_time - startTime) > 1 :
fps = counter / (current_time - startTime)
counter = 0
startTime = current_time
if depth_enabled:
depth = depthQueue.get()
depthFrame = depth.getFrame()
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
if len(detections) != 0:
boundingBoxMapping = xoutBoundingBoxDepthMapping.get()
roiDatas = boundingBoxMapping.getConfigData()
for roiData in roiDatas:
roi = roiData.roi
roi = roi.denormalize(depthFrameColor.shape[1], depthFrameColor.shape[0])
topLeft = roi.topLeft()
bottomRight = roi.bottomRight()
xmin = int(topLeft.x)
ymin = int(topLeft.y)
xmax = int(bottomRight.x)
ymax = int(bottomRight.y)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
# If the frame is available, draw bounding boxes on it and show the frame
height = frame.shape[0]
width = frame.shape[1]
for detection in detections:
# Denormalize bounding box
x1 = int(detection.xmin * width)
x2 = int(detection.xmax * width)
y1 = int(detection.ymin * height)
y2 = int(detection.ymax * height)
try:
label = labelMap[detection.label]
except:
label = detection.label
cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, "{:.2f}".format(detection.confidence*100), (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
if depth_enabled:
cv2.putText(frame, f"X: {int(detection.spatialCoordinates.x)} mm", (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"Y: {int(detection.spatialCoordinates.y)} mm", (x1 + 10, y1 + 65), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"Z: {int(detection.spatialCoordinates.z)} mm", (x1 + 10, y1 + 80), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
server_TCP.datatosend = str(label) + "," + f"{int(detection.confidence * 100)}%"
cv2.putText(frame, "NN fps: {:.2f}".format(fps), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color)
if depth_enabled:
new_width = int(depthFrameColor.shape[1] * (frame.shape[0] / depthFrameColor.shape[0]))
stacked = np.hstack([frame, cv2.resize(depthFrameColor, (new_width, frame.shape[0]))])
cv2.imshow("stacked", stacked)
server_HTTP.frametosend = stacked
else:
cv2.imshow("frame", frame)
server_HTTP.frametosend = frame
if cv2.waitKey(1) == ord('q'):
break
| 39.879668
| 229
| 0.663511
|
import json
import socketserver
import threading
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
from io import BytesIO
from pathlib import Path
import sys
from socketserver import ThreadingMixIn
from time import sleep
import depthai as dai
import numpy as np
import cv2
from PIL import Image
import blobconverter
HTTP_SERVER_PORT = 8090
class TCPServerRequest(socketserver.BaseRequestHandler):
def handle(self):
# Handle is called each time a client is connected
# When OpenDataCam connects, do not return - instead keep the connection open and keep streaming data
# First send HTTP header
header = 'HTTP/1.0 200 OK\r\nServer: Mozarella/2.2\r\nAccept-Range: bytes\r\nConnection: close\r\nMax-Age: 0\r\nExpires: 0\r\nCache-Control: no-cache, private\r\nPragma: no-cache\r\nContent-Type: application/json\r\n\r\n'
self.request.send(header.encode())
while True:
sleep(0.1)
if hasattr(self.server, 'datatosend'):
self.request.send(self.server.datatosend.encode() + "\r\n".encode())
# HTTPServer MJPEG
class VideoStreamHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'multipart/x-mixed-replace; boundary=--jpgboundary')
self.end_headers()
while True:
sleep(0.1)
if hasattr(self.server, 'frametosend'):
image = Image.fromarray(cv2.cvtColor(self.server.frametosend, cv2.COLOR_BGR2RGB))
stream_file = BytesIO()
image.save(stream_file, 'JPEG')
self.wfile.write("--jpgboundary".encode())
self.send_header('Content-type', 'image/jpeg')
self.send_header('Content-length', str(stream_file.getbuffer().nbytes))
self.end_headers()
image.save(self.wfile, 'JPEG')
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
pass
# start TCP data server
server_TCP = socketserver.TCPServer(('localhost', 8070), TCPServerRequest)
th = threading.Thread(target=server_TCP.serve_forever)
th.daemon = True
th.start()
# start MJPEG HTTP Server
server_HTTP = ThreadedHTTPServer(('localhost', HTTP_SERVER_PORT), VideoStreamHandler)
th2 = threading.Thread(target=server_HTTP.serve_forever)
th2.daemon = True
th2.start()
# MobilenetSSD label texts
labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
syncNN = True
def create_pipeline(depth):
# Start defining a pipeline
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2021_2)
# Define a source - color camera
colorCam = pipeline.createColorCamera()
if depth:
mobilenet = pipeline.createMobileNetSpatialDetectionNetwork()
monoLeft = pipeline.createMonoCamera()
monoRight = pipeline.createMonoCamera()
stereo = pipeline.createStereoDepth()
else:
mobilenet = pipeline.createMobileNetDetectionNetwork()
colorCam.setPreviewSize(300, 300)
colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
colorCam.setInterleaved(False)
colorCam.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
mobilenet.setBlobPath(blobconverter.from_zoo("mobilenet-ssd", shaves=6, version="2021.2"))
mobilenet.setConfidenceThreshold(0.5)
mobilenet.input.setBlocking(False)
if depth:
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
# Setting node configs
stereo.initialConfig.setConfidenceThreshold(255)
stereo.depth.link(mobilenet.inputDepth)
mobilenet.setBoundingBoxScaleFactor(0.5)
mobilenet.setDepthLowerThreshold(100)
mobilenet.setDepthUpperThreshold(5000)
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
xoutRgb = pipeline.createXLinkOut()
xoutRgb.setStreamName("rgb")
colorCam.preview.link(mobilenet.input)
if syncNN:
mobilenet.passthrough.link(xoutRgb.input)
else:
colorCam.preview.link(xoutRgb.input)
xoutNN = pipeline.createXLinkOut()
xoutNN.setStreamName("detections")
mobilenet.out.link(xoutNN.input)
if depth:
xoutBoundingBoxDepthMapping = pipeline.createXLinkOut()
xoutBoundingBoxDepthMapping.setStreamName("boundingBoxDepthMapping")
mobilenet.boundingBoxMapping.link(xoutBoundingBoxDepthMapping.input)
xoutDepth = pipeline.createXLinkOut()
xoutDepth.setStreamName("depth")
mobilenet.passthroughDepth.link(xoutDepth.input)
return pipeline
# Pipeline is defined, now we can connect to the device
with dai.Device(dai.OpenVINO.Version.VERSION_2021_2) as device:
cams = device.getConnectedCameras()
depth_enabled = dai.CameraBoardSocket.LEFT in cams and dai.CameraBoardSocket.RIGHT in cams
# Start pipeline
device.startPipeline(create_pipeline(depth_enabled))
print(f"DepthAI is up & running. Navigate to 'localhost:{str(HTTP_SERVER_PORT)}' with Chrome to see the mjpeg stream")
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False)
if depth_enabled:
xoutBoundingBoxDepthMapping = device.getOutputQueue(name="boundingBoxDepthMapping", maxSize=4, blocking=False)
depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
frame = None
detections = []
startTime = time.monotonic()
counter = 0
fps = 0
color = (255, 255, 255)
while True:
inPreview = previewQueue.get()
frame = inPreview.getCvFrame()
inNN = detectionNNQueue.get()
detections = inNN.detections
counter+=1
current_time = time.monotonic()
if (current_time - startTime) > 1 :
fps = counter / (current_time - startTime)
counter = 0
startTime = current_time
if depth_enabled:
depth = depthQueue.get()
depthFrame = depth.getFrame()
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
if len(detections) != 0:
boundingBoxMapping = xoutBoundingBoxDepthMapping.get()
roiDatas = boundingBoxMapping.getConfigData()
for roiData in roiDatas:
roi = roiData.roi
roi = roi.denormalize(depthFrameColor.shape[1], depthFrameColor.shape[0])
topLeft = roi.topLeft()
bottomRight = roi.bottomRight()
xmin = int(topLeft.x)
ymin = int(topLeft.y)
xmax = int(bottomRight.x)
ymax = int(bottomRight.y)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
# If the frame is available, draw bounding boxes on it and show the frame
height = frame.shape[0]
width = frame.shape[1]
for detection in detections:
# Denormalize bounding box
x1 = int(detection.xmin * width)
x2 = int(detection.xmax * width)
y1 = int(detection.ymin * height)
y2 = int(detection.ymax * height)
try:
label = labelMap[detection.label]
except:
label = detection.label
cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, "{:.2f}".format(detection.confidence*100), (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
if depth_enabled:
cv2.putText(frame, f"X: {int(detection.spatialCoordinates.x)} mm", (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"Y: {int(detection.spatialCoordinates.y)} mm", (x1 + 10, y1 + 65), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.putText(frame, f"Z: {int(detection.spatialCoordinates.z)} mm", (x1 + 10, y1 + 80), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
server_TCP.datatosend = str(label) + "," + f"{int(detection.confidence * 100)}%"
cv2.putText(frame, "NN fps: {:.2f}".format(fps), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color)
if depth_enabled:
new_width = int(depthFrameColor.shape[1] * (frame.shape[0] / depthFrameColor.shape[0]))
stacked = np.hstack([frame, cv2.resize(depthFrameColor, (new_width, frame.shape[0]))])
cv2.imshow("stacked", stacked)
server_HTTP.frametosend = stacked
else:
cv2.imshow("frame", frame)
server_HTTP.frametosend = frame
if cv2.waitKey(1) == ord('q'):
break
| 3,719
| 63
| 120
|
90dd2a31d9dd176635170ae37809f983dea5ff2a
| 5,241
|
py
|
Python
|
rbi2/inte5c.py
|
spottedzebra/interpreter
|
b5b2a735d771fbfe2842e4c36176f2bc8c1761c3
|
[
"MIT"
] | 2
|
2016-10-22T11:55:07.000Z
|
2020-07-23T20:56:15.000Z
|
rbi2/inte5c.py
|
mwhit74/interpreter
|
b5b2a735d771fbfe2842e4c36176f2bc8c1761c3
|
[
"MIT"
] | null | null | null |
rbi2/inte5c.py
|
mwhit74/interpreter
|
b5b2a735d771fbfe2842e4c36176f2bc8c1761c3
|
[
"MIT"
] | null | null | null |
import string
import pdb
from collections import namedtuple
import math
ADD, SUB, MUL, DIV, CHAR = 'ADD','SUB','MUL','DIV','CHAR'
NUM, EOF, OPAR, CPAR, POW = 'NUM','EOF','OPAR','CPAR','POW'
WHITESPACE = string.whitespace
Token = namedtuple('Token',['token_type', 'token_value'])
if __name__ == "__main__":
main()
| 27.296875
| 68
| 0.526045
|
import string
import pdb
from collections import namedtuple
import math
ADD, SUB, MUL, DIV, CHAR = 'ADD','SUB','MUL','DIV','CHAR'
NUM, EOF, OPAR, CPAR, POW = 'NUM','EOF','OPAR','CPAR','POW'
WHITESPACE = string.whitespace
Token = namedtuple('Token',['token_type', 'token_value'])
class Lexer(object):
def __init__(self, text):
self.text = text
self.pos = 0
self.cur_char = self.text[self.pos]
def error(self):
raise ValueError('Invalid character')
def get_next_char(self):
self.pos += 1
if self.pos <= len(self.text) - 1:
self.cur_char = self.text[self.pos]
else:
self.cur_char = None
def get_whitespace(self):
value = ''
while self.cur_char != None and self.cur_char in WHITESPACE:
value = value + self.cur_char
self.get_next_char()
def get_num(self):
value = ''
while self.cur_char != None and self.cur_char.isdigit():
value = value + self.cur_char
self.get_next_char()
return int(value)
def get_chars(self):
value = ''
while self.cur_char != None and self.cur_char.isalpha():
value = value + self.cur_char
self.get_next_char()
return value
def get_next_token(self):
while self.cur_char != None:
if self.cur_char in WHITESPACE:
value = self.get_whitespace()
if self.cur_char.isdigit():
value = self.get_num()
return Token(NUM, value)
if self.cur_char.isalpha():
value = self.get_chars()
return Token(CHAR, value)
if self.cur_char == '+':
token = Token(ADD, self.cur_char)
self.get_next_char()
return token
if self.cur_char == '-':
token = Token(SUB, self.cur_char)
self.get_next_char()
return token
if self.cur_char == '*':
token = Token(MUL, self.cur_char)
self.get_next_char()
return token
if self.cur_char == '/':
token = Token(DIV, self.cur_char)
self.get_next_char()
return token
if self.cur_char == '(':
token = Token(OPAR, self.cur_char)
self.get_next_char()
return token
if self.cur_char == ')':
token = Token(CPAR, self.cur_char)
self.get_next_char()
return token
if self.cur_char == '^':
token = Token(POW, self.cur_char)
self.get_next_char()
return token
self.error()
return Token(EOF, None)
class Interpreter(object):
def __init__(self, lexer):
self.lexer = lexer
self.cur_token = self.lexer.get_next_token()
def error(self):
raise SyntaxError('Invalid syntax')
def check_token_type(self, token_type):
print(self.cur_token)
#pdb.set_trace()
if self.cur_token.token_type == token_type:
self.cur_token = self.lexer.get_next_token()
else:
self.error()
def expr1(self):
result = self.expr2()
while (self.cur_token.token_type != EOF and
self.cur_token.token_type in (ADD, SUB)):
if self.cur_token.token_type == ADD:
self.check_token_type(ADD)
result = result + self.expr2()
if self.cur_token.token_type == SUB:
self.check_token_type(SUB)
result = result - self.expr2()
return result
def expr2(self):
result = self.expr3()
while (self.cur_token.token_type != EOF and
self.cur_token.token_type in (MUL, DIV)):
if self.cur_token.token_type == MUL:
self.check_token_type(MUL)
result = result * self.expr3()
if self.cur_token.token_type == DIV:
self.check_token_type(DIV)
result = result / self.expr3()
return result
def expr3(self):
if self.cur_token.token_type == OPAR:
self.check_token_type(OPAR)
result = self.expr1()
self.check_token_type(CPAR)
else:
result = self.expr4()
return result
def expr4(self):
result = self.factor()
while (self.cur_token.token_type != EOF and
self.cur_token.token_type is POW):
self.check_token_type(POW)
result = math.pow(result, self.expr1())
return result
def factor(self):
value = self.cur_token.token_value
self.check_token_type(NUM)
return value
def main():
while True:
try:
text = input('calc>')
except e:
continue
#pdb.set_trace()
lexer = Lexer(text)
interpreter = Interpreter(lexer)
result = interpreter.expr1()
print(result)
if __name__ == "__main__":
main()
| 4,433
| 4
| 482
|
a40403bf837c949fa8d93de759b6d0eca24abb41
| 2,932
|
py
|
Python
|
pwf/response.py
|
victorkohler/pwf
|
67f3c5990a12ce95377e953f3a96487ebd8bdce6
|
[
"MIT"
] | 1
|
2017-03-19T13:22:50.000Z
|
2017-03-19T13:22:50.000Z
|
pwf/response.py
|
victorkohler/pwf
|
67f3c5990a12ce95377e953f3a96487ebd8bdce6
|
[
"MIT"
] | null | null | null |
pwf/response.py
|
victorkohler/pwf
|
67f3c5990a12ce95377e953f3a96487ebd8bdce6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Victor Kohler
@since: date 17/12/2016
@version: 0.1
"""
import httplib
import helpers
import cookies
class Response(object):
"""Used to set and return data back to the server. The Response object
can be instantiated in one of two ways:
1. It is created manually in the view function to add custom headers,
cookies or response codes.
2. It gets created automatically once the view function returns
some data.
Create the response object from the view using app.make_response(data)
"""
def __init__(self, make_response=None, code=200, data=''):
"""For the view data we're currently supporting either a tuple with
both the returned data and a dictionary of headers or just the
returned data.
"""
if isinstance(data, tuple):
self.data = data[0]
headers = data[1]
else:
self.data = data
headers = {}
self.headers = headers
self.code = code
self.make_response = make_response
def set_cookie(self, key, value='', path='/', expires=None, max_age=None,
domain=None, secure=False, httponly=False):
"""Creates a cookie dictionary and adds it to the headers.
This function is ment to be used in the view function:
resp = make_response(data)
resp.set_cookie('key', 'value')
"""
cookie = cookies.create_cookie(key, value, path, expires, max_age,
domain, secure, httponly)
#TODO: Handle multiple cookies
self.headers.update(cookie)
def render(self):
"""Renders the final response back to the server with status code,
headers and data. It aslo transform headers and codes into
a WSGI compatible format.
If status code is 5xx or 4xx no view data is returned.
"""
# If the content type is not specified, we set
# it to text/html as the default
if 'content-type' not in map(lambda x:x.lower(), self.headers):
self.headers['Content-Type'] = 'text/html'
# Set headers as list of tuples
self.headers = [(k, v) for k, v in self.headers.items()]
# httplib.responses maps the HTTP 1.1 status codes to W3C names.
# Output example: '200 OK' or '404 Not Found'
resp_code = '{} {}'.format(self.code, httplib.responses[self.code])
if str(self.code)[0] in ['4', '5'] and not self.data:
self.make_response(resp_code, self.headers)
return resp_code.encode('utf-8')
try:
data = bytes(self.data).encode('utf-8')
except UnicodeDecodeError:
data = bytes(self.data)
self.make_response(resp_code, self.headers)
return data
| 32.94382
| 77
| 0.608458
|
# -*- coding: utf-8 -*-
"""
@author: Victor Kohler
@since: date 17/12/2016
@version: 0.1
"""
import httplib
import helpers
import cookies
class Response(object):
"""Used to set and return data back to the server. The Response object
can be instantiated in one of two ways:
1. It is created manually in the view function to add custom headers,
cookies or response codes.
2. It gets created automatically once the view function returns
some data.
Create the response object from the view using app.make_response(data)
"""
def __init__(self, make_response=None, code=200, data=''):
"""For the view data we're currently supporting either a tuple with
both the returned data and a dictionary of headers or just the
returned data.
"""
if isinstance(data, tuple):
self.data = data[0]
headers = data[1]
else:
self.data = data
headers = {}
self.headers = headers
self.code = code
self.make_response = make_response
def set_cookie(self, key, value='', path='/', expires=None, max_age=None,
domain=None, secure=False, httponly=False):
"""Creates a cookie dictionary and adds it to the headers.
This function is ment to be used in the view function:
resp = make_response(data)
resp.set_cookie('key', 'value')
"""
cookie = cookies.create_cookie(key, value, path, expires, max_age,
domain, secure, httponly)
#TODO: Handle multiple cookies
self.headers.update(cookie)
def render(self):
"""Renders the final response back to the server with status code,
headers and data. It aslo transform headers and codes into
a WSGI compatible format.
If status code is 5xx or 4xx no view data is returned.
"""
# If the content type is not specified, we set
# it to text/html as the default
if 'content-type' not in map(lambda x:x.lower(), self.headers):
self.headers['Content-Type'] = 'text/html'
# Set headers as list of tuples
self.headers = [(k, v) for k, v in self.headers.items()]
# httplib.responses maps the HTTP 1.1 status codes to W3C names.
# Output example: '200 OK' or '404 Not Found'
resp_code = '{} {}'.format(self.code, httplib.responses[self.code])
if str(self.code)[0] in ['4', '5'] and not self.data:
self.make_response(resp_code, self.headers)
return resp_code.encode('utf-8')
try:
data = bytes(self.data).encode('utf-8')
except UnicodeDecodeError:
data = bytes(self.data)
self.make_response(resp_code, self.headers)
return data
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__dict__)
| 65
| 0
| 27
|
5f695683bffcbb1e02c0ef543557ed7122feba6d
| 5,208
|
py
|
Python
|
tests/algorithms/test_munchausen_dqn.py
|
sony/nnabla-rl
|
6a9a91ac5363b8611e0c9f736590729952a8d460
|
[
"Apache-2.0"
] | 75
|
2021-06-14T02:35:19.000Z
|
2022-03-23T04:30:24.000Z
|
tests/algorithms/test_munchausen_dqn.py
|
sony/nnabla-rl
|
6a9a91ac5363b8611e0c9f736590729952a8d460
|
[
"Apache-2.0"
] | 2
|
2021-12-17T08:46:54.000Z
|
2022-03-15T02:04:53.000Z
|
tests/algorithms/test_munchausen_dqn.py
|
sony/nnabla-rl
|
6a9a91ac5363b8611e0c9f736590729952a8d460
|
[
"Apache-2.0"
] | 3
|
2021-06-15T13:32:57.000Z
|
2022-03-25T16:53:14.000Z
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import nnabla as nn
import nnabla_rl.algorithms as A
import nnabla_rl.environments as E
from nnabla_rl.replay_buffer import ReplayBuffer
if __name__ == "__main__":
from testing_utils import generate_dummy_experiences
pytest.main()
else:
from ..testing_utils import generate_dummy_experiences
| 35.189189
| 96
| 0.680108
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import nnabla as nn
import nnabla_rl.algorithms as A
import nnabla_rl.environments as E
from nnabla_rl.replay_buffer import ReplayBuffer
class TestMunchausenDQN(object):
def setup_method(self, method):
nn.clear_parameters()
def test_algorithm_name(self):
dummy_env = E.DummyDiscreteImg()
dqn = A.MunchausenDQN(dummy_env)
assert dqn.__name__ == 'MunchausenDQN'
def test_continuous_action_env_unsupported(self):
'''
Check that error occurs when training on continuous action env
'''
dummy_env = E.DummyContinuous()
config = A.MunchausenDQNConfig()
with pytest.raises(Exception):
A.MunchausenDQN(dummy_env, config=config)
def test_run_online_training(self):
'''
Check that no error occurs when calling online training
'''
dummy_env = E.DummyDiscreteImg()
config = A.MunchausenDQNConfig()
config.start_timesteps = 5
config.batch_size = 5
config.learner_update_frequency = 1
config.target_update_frequency = 1
dqn = A.MunchausenDQN(dummy_env, config=config)
dqn.train_online(dummy_env, total_iterations=10)
def test_run_online_training_multistep(self):
'''
Check that no error occurs when calling online training
'''
dummy_env = E.DummyDiscreteImg()
config = A.MunchausenDQNConfig()
config.num_steps = 2
config.start_timesteps = 5
config.batch_size = 5
config.learner_update_frequency = 1
config.target_update_frequency = 1
dqn = A.MunchausenDQN(dummy_env, config=config)
dqn.train_online(dummy_env, total_iterations=10)
def test_run_offline_training(self):
'''
Check that no error occurs when calling offline training
'''
dummy_env = E.DummyDiscreteImg()
batch_size = 5
config = A.MunchausenDQNConfig()
config.batch_size = batch_size
config.learner_update_frequency = 1
config.target_update_frequency = 1
dqn = A.MunchausenDQN(dummy_env, config=config)
experiences = generate_dummy_experiences(dummy_env, batch_size)
buffer = ReplayBuffer()
buffer.append_all(experiences)
dqn.train_offline(buffer, total_iterations=5)
def test_compute_eval_action(self):
dummy_env = E.DummyDiscreteImg()
dqn = A.MunchausenDQN(dummy_env)
state = dummy_env.reset()
state = np.float32(state)
action = dqn.compute_eval_action(state)
assert action.shape == (1,)
def test_parameter_range(self):
with pytest.raises(ValueError):
A.MunchausenDQNConfig(gamma=-0.1)
with pytest.raises(ValueError):
A.MunchausenDQNConfig(batch_size=-1)
with pytest.raises(ValueError):
A.MunchausenDQNConfig(learning_rate=-0.1)
with pytest.raises(ValueError):
A.MunchausenDQNConfig(learner_update_frequency=-1000)
with pytest.raises(ValueError):
A.MunchausenDQNConfig(target_update_frequency=-1000)
with pytest.raises(ValueError):
A.MunchausenDQNConfig(start_timesteps=-1000)
with pytest.raises(ValueError):
A.MunchausenDQNConfig(replay_buffer_size=-1000)
with pytest.raises(ValueError):
A.MunchausenDQNConfig(initial_epsilon=1.5)
with pytest.raises(ValueError):
A.MunchausenDQNConfig(final_epsilon=1.1)
with pytest.raises(ValueError):
A.MunchausenDQNConfig(test_epsilon=-1000)
with pytest.raises(ValueError):
A.MunchausenDQNConfig(max_explore_steps=-100)
def test_latest_iteration_state(self):
'''
Check that latest iteration state has the keys and values we expected
'''
dummy_env = E.DummyDiscreteImg()
m_dqn = A.MunchausenDQN(dummy_env)
m_dqn._q_function_trainer_state = {'q_loss': 0., 'td_errors': np.array([0., 1.])}
latest_iteration_state = m_dqn.latest_iteration_state
assert 'q_loss' in latest_iteration_state['scalar']
assert 'td_errors' in latest_iteration_state['histogram']
assert latest_iteration_state['scalar']['q_loss'] == 0.
assert np.allclose(latest_iteration_state['histogram']['td_errors'], np.array([0., 1.]))
if __name__ == "__main__":
from testing_utils import generate_dummy_experiences
pytest.main()
else:
from ..testing_utils import generate_dummy_experiences
| 1,496
| 2,725
| 23
|
1474b532b2f24c55f34fba1679b92a44ff8cc539
| 542
|
py
|
Python
|
jobs/migrations/0004_auto_20180425_1550.py
|
Manasranjanpati/Jobpost
|
e5654129538e70cedf8aafc65c1b0289a01535e5
|
[
"MIT"
] | 20
|
2018-05-04T18:42:35.000Z
|
2021-03-18T07:15:12.000Z
|
src/jobs/migrations/0004_auto_20180425_1550.py
|
fleepgeek/django-jobsite
|
d9547c4ee85751677ba6458380b609973c3b4a8d
|
[
"MIT"
] | 5
|
2020-02-11T22:22:33.000Z
|
2021-06-10T20:18:05.000Z
|
jobs/migrations/0004_auto_20180425_1550.py
|
Manasranjanpati/Jobpost
|
e5654129538e70cedf8aafc65c1b0289a01535e5
|
[
"MIT"
] | 8
|
2018-05-04T19:03:23.000Z
|
2020-09-23T00:24:46.000Z
|
# Generated by Django 2.0.4 on 2018-04-25 14:50
from django.db import migrations, models
import django.db.models.deletion
| 23.565217
| 105
| 0.614391
|
# Generated by Django 2.0.4 on 2018-04-25 14:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobs', '0003_auto_20180425_1406'),
]
operations = [
migrations.AlterField(
model_name='job',
name='industry',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Industry'),
),
migrations.DeleteModel(
name='Industry',
),
]
| 0
| 395
| 23
|
d7bd61d9ea55136b2311139359d783a5db5ccfee
| 8,769
|
py
|
Python
|
src/Communication/Client/app.py
|
aquino35/elevator_system_prototype
|
f4b45df0852a3b6e97a32a3c312a7cc3aacb2d22
|
[
"MIT"
] | null | null | null |
src/Communication/Client/app.py
|
aquino35/elevator_system_prototype
|
f4b45df0852a3b6e97a32a3c312a7cc3aacb2d22
|
[
"MIT"
] | null | null | null |
src/Communication/Client/app.py
|
aquino35/elevator_system_prototype
|
f4b45df0852a3b6e97a32a3c312a7cc3aacb2d22
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# from .app_macros import * #relative import - did not work properly
from app_macros import *
| 40.041096
| 145
| 0.68115
|
#!/usr/bin/env python
# from .app_macros import * #relative import - did not work properly
from app_macros import *
class App():
def __init__(self,):
""" App's constructor."""
self.init_app()
def init_app(self):
""" Initializes the App. """
try:
self.setup_app()
self.main_label = self.create_label(CONST_WELCOME_MSG, 25, 5, 5)
self.elevator_request_button = self.create_button("elevator_request_button",
self.frame, CONST_ELEVATOR_REQUEST_LABEL, self.build_elevator_keypad, 5, 8)
self.run_app() # runs app on a loop
except:
print("Error initializing the app")
def setup_app(self):
""" Sets the App's frame and title. """
self.frame = Tk() # Creating main window application
self.frame.geometry(CONST_GEOMETRY) # Setting size of the application's frame.
self.frame.title(CONST_TITLE) # Assigning the application a title.
def create_button(self, button_name, frame, txt, cb, _col, _row):
""" Makes a button. """
button_name = Button(frame, text=txt, command=cb)
button_name.grid(column=_col, row=_row )
return button_name
def run_app(self):
""" Runs the App. """
self.frame.mainloop()
def destroy_button(self, button_name):
""" Destroy a button in the App. """
button_name.destroy()
def create_label(self, txt, size, _col, _row):
""" Set the App's main label. """
self.label = Label(self.frame, text=txt, font=(CONST_FONT, size))
self.label.grid(column= _col, row= _row)
return self.label
def change_label(self, label, txt, size):
""" Changes App's main label. Note: Does not change grid. """
self.label.configure(text=txt, font=(CONST_FONT, size))
def terminate_app(self):
""" Changes App's main label."""
return
def display_info(self, info):
""" Display pop-up information of the elevator system."""
messagebox.showinfo(CONST_TITLE, info)
def display_warning(self, info):
""" Display pop-up warnings of the elevator system."""
messagebox.showwarning(CONST_TITLE, info)
def display_error(self, info):
""" Display pop-up errors of the elevator system."""
messagebox.showerror(CONST_TITLE, info)
# Next up we can see our callbacks defined for the buttons
def build_elevator_keypad(self):
""" Command that builds the keypad of the elevator system."""
self.destroy_button(self.elevator_request_button) # destroy the request button
self.display_info(REQUEST_MSG) # alert user that an elevator is on its way.
# creating the elevator's keypad:
self.elev_manager = ElevatorManager()
self.display_elevator_attr()
self.build_keypad_layers()
def build_keypad_layers(self):
""" Builds all the layers of the keypad."""
self.build_first_keypad_layer()
self.build_second_keypad_layer()
self.build_third_keypad_layer()
self.build_fourth_keypad_layer()
self.build_fith_keypad_layer()
self.build_sixth_keypad_layer()
self.build_seventh_keypad_layer()
self.build_eight_keypad_layer()
def build_eight_keypad_layer(self):
""" Makes button for eight layer of the keypad."""
self.capacity_label = self.create_label(CAPACITY_LABEL, CONST_LABEL_SIZE, 1, 18)
self.max_weight_label = self.create_label(MAXIMUM_WEIGHT_LABEL, CONST_LABEL_SIZE, 4, 18)
def build_seventh_keypad_layer(self):
""" Makes buttons for seventh layer of the keypad."""
self.maintenance_button = self.create_button("maintenance_button", self.frame,
MAINTENANCE_LABEL, self.get_elevator_maintenance_state, 4, 15)
def build_sixth_keypad_layer(self):
""" Makes buttons for sixth layer of the keypad."""
self.open_door_button = self.create_button("open_door_button", self.frame,
OPEN_DOOR_LABEL, self.open_elevator_door, 3, 6)
self.close_door_button = self.create_button("close_door_button",self.frame,
CLOSE_DOOR_LABEL, self.close_elevator_door, 4, 6)
self.lobby_button = self.create_button("lobby_button", self.frame, LOBBY_LABEL, self.dummy, 5, 6)
def build_fith_keypad_layer(self):
""" Makes buttons for fith layer of the keypad."""
self.f1_button = self.create_button("f1_button", self.frame, FIRST_FLOOR_LABEL, self.set_elevator_floor(CONST_FIRST_FLOOR), 3, 4)
self.f2_button = self.create_button("f2_button", self.frame, SECOND_FLOOR_LABEL, self.set_elevator_floor(CONST_SECOND_FLOOR), 4, 4)
self.f3_button = self.create_button("f3_button", self.frame, THIRD_FLOOR_LABEL, self.set_elevator_floor(CONST_THIRD_FLOOR), 5, 4)
def build_fourth_keypad_layer(self):
""" Makes buttons for fourth layer of the keypad."""
self.f4_button = self.create_button("f4_button", self.frame, FOURTH_FLOOR_LABEL, self.set_elevator_floor(CONST_FOURTH_FLOOR), 3, 3)
self.f5_button = self.create_button("f5_button", self.frame, FITH_FLOOR_LABEL, self.set_elevator_floor(CONST_FITH_FLOOR), 4, 3)
self.f6_button = self.create_button("f6_button", self.frame, SIXTH_FLOOR_LABEL, self.set_elevator_floor(CONST_SIXTH_FLOOR), 5, 3)
def build_third_keypad_layer(self):
""" Makes buttons for third layer of the keypad"""
self.f7_button = self.create_button("f7_button", self.frame, SEVENTH_FLOOR_LABEL, self.set_elevator_floor(TENTH_FLOOR_LABEL), 3, 2)
self.f8_button = self.create_button("f8_button", self.frame, EIGHTH_FLOOR_LABEL, self.set_elevator_floor(ELEVENTH_FLOOR_LABEL), 4, 2)
self.f9_button = self.create_button("f9_button", self.frame, NEIGH_FLOOR_LABEL, self.set_elevator_floor(TWELFTH_FLOOR_LABEL), 5, 2)
def build_second_keypad_layer(self):
""" Makes buttons for second layer of the keypad."""
self.f10_button = self.create_button("f10_button", self.frame, TENTH_FLOOR_LABEL, self.set_elevator_floor(TENTH_FLOOR_LABEL), 3, 1)
self.f11_button = self.create_button("f11_button", self.frame, ELEVENTH_FLOOR_LABEL, self.set_elevator_floor(ELEVENTH_FLOOR_LABEL), 4, 1)
self.f12_button = self.create_button("f12_button", self.frame, TWELFTH_FLOOR_LABEL, self.set_elevator_floor(TWELFTH_FLOOR_LABEL), 5, 1)
def build_first_keypad_layer(self):
""" Makes label for first layer of the keypad"""
self.keypad_label = self.create_label(KEYPAD_LABEL, CONST_LABEL_SIZE, 3.5, 0) # title of keypad
def dummy(self):
return
def display_elevator_attr(self):
""" Display elevator attributes to the user."""
try:
self.elev_manager.display_elevator_attr()
self.change_label(self.main_label,CURRENT_FLOOR_LABEL, CONST_LABEL_SIZE) # current floor
self.label.grid(column=0, row=1) # changing the main label's grid location
self.current_temperature_label = self.create_label(CURRENT_TEMPERATURE_LABEL, CONST_LABEL_SIZE, 0, 3) # current temp
self.current_weight_label = self.create_label(CURRENT_WEIGHT_LABEL, CONST_LABEL_SIZE, 0, 5) # current weight
self.person_count_label = self.create_label(PERSON_COUNT_LABEL, CONST_LABEL_SIZE, 0, 7) # current person count
# tests
self.elev_manager.init_comm()
if not (self.elev_manager.arduino_message_queue.empty()):
self.arduino_test_msg = self.elev_manager.arduino_message_queue.get()
#print(self.arduino_test_msg)
self.arduino_test = self.create_label(self.arduino_test_msg, CONST_LABEL_SIZE, 0, 2)
self.arduino_test = self.create_label(self.arduino_test_msg, CONST_LABEL_SIZE, 0, 4)
self.arduino_test = self.create_label(self.arduino_test_msg, CONST_LABEL_SIZE, 0, 6)
self.arduino_test = self.create_label(self.arduino_test_msg, CONST_LABEL_SIZE, 0, 8)
except:
print("Elevator attributes could not be displayed")
def open_elevator_door(self):
""" Calls the manager to open the door."""
self.elev_manager.set_elevator_door_status()
def close_elevator_door(self):
self.elev_manager.set_elevator_door_status()
def set_elevator_floor(self, floor):
self.elev_manager.set_elevator_floor(floor)
def get_elevator_lobby(self):
self.elev_manager.get_elevator_lobby()
def get_elevator_maintenance_state(self):
self.elev_manager.get_elevator_maintenance_state()
| 276
| 8,348
| 23
|
715654525e3b92ba082de887c45890f1f82a1f3f
| 1,973
|
py
|
Python
|
book/src/ch03/src/exceptions_2.py
|
zangyuchen2008/Clean-Code-in-Python-Second-Edition
|
0be2e41f6cf7322e12ec55d76135ff398df61b4a
|
[
"MIT"
] | 133
|
2016-07-22T15:16:16.000Z
|
2022-03-29T22:39:40.000Z
|
book/src/ch03/src/exceptions_2.py
|
zangyuchen2008/Clean-Code-in-Python-Second-Edition
|
0be2e41f6cf7322e12ec55d76135ff398df61b4a
|
[
"MIT"
] | 137
|
2021-01-05T11:21:04.000Z
|
2022-03-31T11:10:11.000Z
|
book/src/ch03/src/exceptions_2.py
|
zangyuchen2008/Clean-Code-in-Python-Second-Edition
|
0be2e41f6cf7322e12ec55d76135ff398df61b4a
|
[
"MIT"
] | 41
|
2020-12-29T04:46:14.000Z
|
2022-03-20T22:36:17.000Z
|
"""Clean Code in Python - Chapter 3: General Traits of Good Code
> Error Handling - Exceptions
"""
import logging
import time
from base import Connector, Event
logger = logging.getLogger(__name__)
def connect_with_retry(
connector: Connector, retry_n_times: int, retry_backoff: int = 5
):
"""Tries to establish the connection of <connector> retrying
<retry_n_times>, and waiting <retry_backoff> seconds between attempts.
If it can connect, returns the connection object.
If it's not possible to connect after the retries have been exhausted, raises ``ConnectionError``.
:param connector: An object with a ``.connect()`` method.
:param retry_n_times int: The number of times to try to call
``connector.connect()``.
:param retry_backoff int: The time lapse between retry calls.
"""
for _ in range(retry_n_times):
try:
return connector.connect()
except ConnectionError as e:
logger.info(
"%s: attempting new connection in %is", e, retry_backoff
)
time.sleep(retry_backoff)
exc = ConnectionError(f"Couldn't connect after {retry_n_times} times")
logger.exception(exc)
raise exc
class DataTransport:
"""An example of an object that separates the exception handling by
abstraction levels.
"""
_RETRY_BACKOFF: int = 5
_RETRY_TIMES: int = 3
| 30.353846
| 102
| 0.647745
|
"""Clean Code in Python - Chapter 3: General Traits of Good Code
> Error Handling - Exceptions
"""
import logging
import time
from base import Connector, Event
logger = logging.getLogger(__name__)
def connect_with_retry(
connector: Connector, retry_n_times: int, retry_backoff: int = 5
):
"""Tries to establish the connection of <connector> retrying
<retry_n_times>, and waiting <retry_backoff> seconds between attempts.
If it can connect, returns the connection object.
If it's not possible to connect after the retries have been exhausted, raises ``ConnectionError``.
:param connector: An object with a ``.connect()`` method.
:param retry_n_times int: The number of times to try to call
``connector.connect()``.
:param retry_backoff int: The time lapse between retry calls.
"""
for _ in range(retry_n_times):
try:
return connector.connect()
except ConnectionError as e:
logger.info(
"%s: attempting new connection in %is", e, retry_backoff
)
time.sleep(retry_backoff)
exc = ConnectionError(f"Couldn't connect after {retry_n_times} times")
logger.exception(exc)
raise exc
class DataTransport:
"""An example of an object that separates the exception handling by
abstraction levels.
"""
_RETRY_BACKOFF: int = 5
_RETRY_TIMES: int = 3
def __init__(self, connector: Connector) -> None:
self._connector = connector
self.connection = None
def deliver_event(self, event: Event):
self.connection = connect_with_retry(
self._connector, self._RETRY_TIMES, self._RETRY_BACKOFF
)
self.send(event)
def send(self, event: Event):
try:
return self.connection.send(event.decode())
except ValueError as e:
logger.error("%r contains incorrect data: %s", event, e)
raise
| 457
| 0
| 81
|
8d79833ad772a85de78f2fa31627239a732baf79
| 373
|
py
|
Python
|
modules/tag/controllers.py
|
srcc-msu/job_statistics
|
74680a4e4c105ebcff94f089e07fcb44dbcc12d9
|
[
"MIT"
] | null | null | null |
modules/tag/controllers.py
|
srcc-msu/job_statistics
|
74680a4e4c105ebcff94f089e07fcb44dbcc12d9
|
[
"MIT"
] | null | null | null |
modules/tag/controllers.py
|
srcc-msu/job_statistics
|
74680a4e4c105ebcff94f089e07fcb44dbcc12d9
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, render_template, Response
from application.helpers import requires_auth
from core.tag.models import Tag
tag_pages = Blueprint('tag', __name__
, template_folder='templates', static_folder='static')
@tag_pages.route("/list")
@requires_auth
| 23.3125
| 55
| 0.772118
|
from flask import Blueprint, render_template, Response
from application.helpers import requires_auth
from core.tag.models import Tag
tag_pages = Blueprint('tag', __name__
, template_folder='templates', static_folder='static')
@tag_pages.route("/list")
@requires_auth
def urls() -> Response:
tags = Tag.query.all()
return render_template("tag_list.html", tags=tags)
| 79
| 0
| 22
|
b57b9505c8bad95a1530bcc342608852f891d4ba
| 9,474
|
py
|
Python
|
retinaFace/widerface_evaluate.py
|
factzero/pytorch_jaguarface_examples
|
f248ff8899b8fe9d41a1e8ac095ed5b6688987ed
|
[
"MIT"
] | 2
|
2020-04-09T05:48:35.000Z
|
2020-05-05T03:22:20.000Z
|
retinaFace/widerface_evaluate.py
|
factzero/pytorch_jaguarface_examples
|
f248ff8899b8fe9d41a1e8ac095ed5b6688987ed
|
[
"MIT"
] | 1
|
2020-04-09T05:49:49.000Z
|
2020-04-09T05:49:49.000Z
|
retinaFace/widerface_evaluate.py
|
factzero/pytorch_jaguarface_examples
|
f248ff8899b8fe9d41a1e8ac095ed5b6688987ed
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import os
import tqdm
import pickle
import argparse
import numpy as np
from scipy.io import loadmat
parser = argparse.ArgumentParser(description='widerface evaluate')
parser.add_argument('-p', '--pred', default="./widerface_evaluate/widerface_txt/")
parser.add_argument('-g', '--gt', default='./widerface_evaluate/ground_truth/')
args = parser.parse_args()
def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(
y_end - y_start + 1, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
def get_gt_boxes(gt_dir):
""" gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)"""
gt_mat = loadmat(os.path.join(gt_dir, 'wider_face_val.mat'))
hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat'))
medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat'))
easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat'))
facebox_list = gt_mat['face_bbx_list']
event_list = gt_mat['event_list']
file_list = gt_mat['file_list']
hard_gt_list = hard_mat['gt_list']
medium_gt_list = medium_mat['gt_list']
easy_gt_list = easy_mat['gt_list']
return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list
def norm_score(pred):
""" norm score
pred {key: [[x1,y1,x2,y2,s]]}
"""
max_score = 0
min_score = 1
for _, k in pred.items():
for _, v in k.items():
if len(v) == 0:
continue
_min = np.min(v[:, -1])
_max = np.max(v[:, -1])
max_score = max(_max, max_score)
min_score = min(_min, min_score)
diff = max_score - min_score
for _, k in pred.items():
for _, v in k.items():
if len(v) == 0:
continue
v[:, -1] = (v[:, -1] - min_score)/diff
def image_eval(pred, gt, ignore, iou_thresh):
""" single image evaluation
pred: Nx5
gt: Nx4
ignore:
"""
_pred = pred.copy()
_gt = gt.copy()
pred_recall = np.zeros(_pred.shape[0])
recall_list = np.zeros(_gt.shape[0])
proposal_list = np.ones(_pred.shape[0])
_pred[:, 2] = _pred[:, 2] + _pred[:, 0]
_pred[:, 3] = _pred[:, 3] + _pred[:, 1]
_gt[:, 2] = _gt[:, 2] + _gt[:, 0]
_gt[:, 3] = _gt[:, 3] + _gt[:, 1]
overlaps = bbox_overlaps(_pred[:, :4], _gt)
for h in range(_pred.shape[0]):
gt_overlap = overlaps[h]
max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax()
if max_overlap >= iou_thresh:
if ignore[max_idx] == 0:
recall_list[max_idx] = -1
proposal_list[h] = -1
elif recall_list[max_idx] == 0:
recall_list[max_idx] = 1
r_keep_index = np.where(recall_list == 1)[0]
pred_recall[h] = len(r_keep_index)
return pred_recall, proposal_list
if __name__ == '__main__':
evaluation(args.pred, args.gt)
| 32.895833
| 136
| 0.569559
|
# -*- coding: UTF-8 -*-
import os
import tqdm
import pickle
import argparse
import numpy as np
from scipy.io import loadmat
parser = argparse.ArgumentParser(description='widerface evaluate')
parser.add_argument('-p', '--pred', default="./widerface_evaluate/widerface_txt/")
parser.add_argument('-g', '--gt', default='./widerface_evaluate/ground_truth/')
args = parser.parse_args()
def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(
y_end - y_start + 1, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
def get_gt_boxes(gt_dir):
""" gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)"""
gt_mat = loadmat(os.path.join(gt_dir, 'wider_face_val.mat'))
hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat'))
medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat'))
easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat'))
facebox_list = gt_mat['face_bbx_list']
event_list = gt_mat['event_list']
file_list = gt_mat['file_list']
hard_gt_list = hard_mat['gt_list']
medium_gt_list = medium_mat['gt_list']
easy_gt_list = easy_mat['gt_list']
return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list
def read_pred_file(filepath):
with open(filepath, 'r') as f:
lines = f.readlines()
img_file = lines[0].rstrip('\n\r')
lines = lines[2:]
# b = lines[0].rstrip('\r\n').split(' ')[:-1]
# c = float(b)
# a = map(lambda x: [[float(a[0]), float(a[1]), float(a[2]), float(a[3]), float(a[4])] for a in x.rstrip('\r\n').split(' ')], lines)
boxes = []
for line in lines:
line = line.rstrip('\r\n').split(' ')
if line[0] is '':
continue
# a = float(line[4])
boxes.append([float(line[0]), float(line[1]), float(
line[2]), float(line[3]), float(line[4])])
boxes = np.array(boxes)
# boxes = np.array(list(map(lambda x: [float(a) for a in x.rstrip('\r\n').split(' ')], lines))).astype('float')
return img_file.split('/')[-1], boxes
def get_preds(pred_dir):
events = os.listdir(pred_dir)
boxes = dict()
pbar = tqdm.tqdm(events)
for event in pbar:
pbar.set_description('Reading Predictions ')
event_dir = os.path.join(pred_dir, event)
event_images = os.listdir(event_dir)
current_event = dict()
for imgtxt in event_images:
imgname, _boxes = read_pred_file(os.path.join(event_dir, imgtxt))
current_event[imgname.rstrip('.jpg')] = _boxes
boxes[event] = current_event
return boxes
def norm_score(pred):
""" norm score
pred {key: [[x1,y1,x2,y2,s]]}
"""
max_score = 0
min_score = 1
for _, k in pred.items():
for _, v in k.items():
if len(v) == 0:
continue
_min = np.min(v[:, -1])
_max = np.max(v[:, -1])
max_score = max(_max, max_score)
min_score = min(_min, min_score)
diff = max_score - min_score
for _, k in pred.items():
for _, v in k.items():
if len(v) == 0:
continue
v[:, -1] = (v[:, -1] - min_score)/diff
def image_eval(pred, gt, ignore, iou_thresh):
""" single image evaluation
pred: Nx5
gt: Nx4
ignore:
"""
_pred = pred.copy()
_gt = gt.copy()
pred_recall = np.zeros(_pred.shape[0])
recall_list = np.zeros(_gt.shape[0])
proposal_list = np.ones(_pred.shape[0])
_pred[:, 2] = _pred[:, 2] + _pred[:, 0]
_pred[:, 3] = _pred[:, 3] + _pred[:, 1]
_gt[:, 2] = _gt[:, 2] + _gt[:, 0]
_gt[:, 3] = _gt[:, 3] + _gt[:, 1]
overlaps = bbox_overlaps(_pred[:, :4], _gt)
for h in range(_pred.shape[0]):
gt_overlap = overlaps[h]
max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax()
if max_overlap >= iou_thresh:
if ignore[max_idx] == 0:
recall_list[max_idx] = -1
proposal_list[h] = -1
elif recall_list[max_idx] == 0:
recall_list[max_idx] = 1
r_keep_index = np.where(recall_list == 1)[0]
pred_recall[h] = len(r_keep_index)
return pred_recall, proposal_list
def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall):
pr_info = np.zeros((thresh_num, 2)).astype('float')
for t in range(thresh_num):
thresh = 1 - (t+1)/thresh_num
r_index = np.where(pred_info[:, 4] >= thresh)[0]
if len(r_index) == 0:
pr_info[t, 0] = 0
pr_info[t, 1] = 0
else:
r_index = r_index[-1]
p_index = np.where(proposal_list[:r_index+1] == 1)[0]
pr_info[t, 0] = len(p_index)
pr_info[t, 1] = pred_recall[r_index]
return pr_info
def dataset_pr_info(thresh_num, pr_curve, count_face):
_pr_curve = np.zeros((thresh_num, 2))
for i in range(thresh_num):
_pr_curve[i, 0] = pr_curve[i, 1] / pr_curve[i, 0]
_pr_curve[i, 1] = pr_curve[i, 1] / count_face
return _pr_curve
def voc_ap(rec, prec):
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def evaluation(pred, gt_path, iou_thresh=0.5):
pred = get_preds(pred)
norm_score(pred)
facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = get_gt_boxes(
gt_path)
event_num = len(event_list)
thresh_num = 1000
settings = ['easy', 'medium', 'hard']
setting_gts = [easy_gt_list, medium_gt_list, hard_gt_list]
aps = []
for setting_id in range(3):
# different setting
gt_list = setting_gts[setting_id]
count_face = 0
pr_curve = np.zeros((thresh_num, 2)).astype('float')
# [hard, medium, easy]
pbar = tqdm.tqdm(range(event_num))
for i in pbar:
pbar.set_description('Processing {}'.format(settings[setting_id]))
event_name = str(event_list[i][0][0])
img_list = file_list[i][0]
pred_list = pred[event_name]
sub_gt_list = gt_list[i][0]
# img_pr_info_list = np.zeros((len(img_list), thresh_num, 2))
gt_bbx_list = facebox_list[i][0]
for j in range(len(img_list)):
pred_info = pred_list[str(img_list[j][0][0])]
gt_boxes = gt_bbx_list[j][0].astype('float')
keep_index = sub_gt_list[j][0]
count_face += len(keep_index)
if len(gt_boxes) == 0 or len(pred_info) == 0:
continue
ignore = np.zeros(gt_boxes.shape[0])
if len(keep_index) != 0:
ignore[keep_index-1] = 1
pred_recall, proposal_list = image_eval(
pred_info, gt_boxes, ignore, iou_thresh)
_img_pr_info = img_pr_info(
thresh_num, pred_info, proposal_list, pred_recall)
pr_curve += _img_pr_info
pr_curve = dataset_pr_info(thresh_num, pr_curve, count_face)
propose = pr_curve[:, 0]
recall = pr_curve[:, 1]
ap = voc_ap(recall, propose)
aps.append(ap)
print("==================== Results ====================")
print("Easy Val AP: {}".format(aps[0]))
print("Medium Val AP: {}".format(aps[1]))
print("Hard Val AP: {}".format(aps[2]))
print("=================================================")
if __name__ == '__main__':
evaluation(args.pred, args.gt)
| 4,904
| 0
| 138
|
b6ea7427c056f0c167209b3122696506b99b84b8
| 1,279
|
py
|
Python
|
scripts/appveyor/copydeps.py
|
seec-team/seec
|
4b92456011e86b70f9d88833a95c1f655a21cf1a
|
[
"MIT"
] | 7
|
2018-06-25T12:06:13.000Z
|
2022-01-18T09:20:13.000Z
|
scripts/appveyor/copydeps.py
|
seec-team/seec
|
4b92456011e86b70f9d88833a95c1f655a21cf1a
|
[
"MIT"
] | 20
|
2016-12-01T23:46:12.000Z
|
2019-08-11T02:41:04.000Z
|
scripts/appveyor/copydeps.py
|
seec-team/seec
|
4b92456011e86b70f9d88833a95c1f655a21cf1a
|
[
"MIT"
] | 1
|
2020-10-19T03:20:05.000Z
|
2020-10-19T03:20:05.000Z
|
import os
import subprocess
import shutil
OBJDUMP = "objdump"
DLL_NAME_PREFIX = 'DLL Name:'
paths = filter(lambda line: not "windows" in line.lower(),
os.environ['PATH'].split(os.pathsep))
searchfiles = [f for f in os.listdir('.') if is_interesting_local_file(f)]
while len(searchfiles):
localfile = searchfiles.pop(0)
print "checking " + localfile
for dep in get_dependencies(localfile):
if not in_cwd(dep):
fullpath = find_in_path(dep)
if fullpath:
print "copying from " + fullpath
shutil.copy(fullpath, os.getcwd())
searchfiles.append(dep)
| 27.804348
| 78
| 0.684128
|
import os
import subprocess
import shutil
OBJDUMP = "objdump"
DLL_NAME_PREFIX = 'DLL Name:'
paths = filter(lambda line: not "windows" in line.lower(),
os.environ['PATH'].split(os.pathsep))
def find_in_path(file):
for path in paths:
fullpath = os.path.join(path, file)
if os.path.isfile(fullpath):
return fullpath
return None
def in_cwd(file):
return os.path.isfile(os.path.join(os.getcwd(), file))
def get_dependencies(file):
try:
for line in subprocess.check_output([OBJDUMP, '-p', file]).split('\n'):
if DLL_NAME_PREFIX in line:
yield line[line.find(DLL_NAME_PREFIX) + len(DLL_NAME_PREFIX):].strip()
except CalledProcessError:
print "exception checking " + file
pass
def is_interesting_local_file(file):
lwr = file.lower()
return os.path.isfile(file) and (lwr.endswith("exe") or lwr.endswith("dll"))
searchfiles = [f for f in os.listdir('.') if is_interesting_local_file(f)]
while len(searchfiles):
localfile = searchfiles.pop(0)
print "checking " + localfile
for dep in get_dependencies(localfile):
if not in_cwd(dep):
fullpath = find_in_path(dep)
if fullpath:
print "copying from " + fullpath
shutil.copy(fullpath, os.getcwd())
searchfiles.append(dep)
| 579
| 0
| 92
|
d64d7f2f072d118598f9c54bda691695735052c8
| 2,320
|
py
|
Python
|
thirdparty/blender_autocomplete-master/2.82/bpy/ops/import_anim.py
|
Ray1184/HPMSBatch
|
3852710e7366361cb9e90f471ddccbbce5ffe8ee
|
[
"MIT"
] | null | null | null |
thirdparty/blender_autocomplete-master/2.82/bpy/ops/import_anim.py
|
Ray1184/HPMSBatch
|
3852710e7366361cb9e90f471ddccbbce5ffe8ee
|
[
"MIT"
] | null | null | null |
thirdparty/blender_autocomplete-master/2.82/bpy/ops/import_anim.py
|
Ray1184/HPMSBatch
|
3852710e7366361cb9e90f471ddccbbce5ffe8ee
|
[
"MIT"
] | null | null | null |
import sys
import typing
def bvh(filepath: str = "",
filter_glob: str = "*.bvh",
target: typing.Union[str, int] = 'ARMATURE',
global_scale: float = 1.0,
frame_start: int = 1,
use_fps_scale: bool = False,
update_scene_fps: bool = False,
update_scene_duration: bool = False,
use_cyclic: bool = False,
rotate_mode: typing.Union[str, int] = 'NATIVE',
axis_forward: typing.Union[str, int] = '-Z',
axis_up: typing.Union[str, int] = 'Y'):
'''Load a BVH motion capture file
:param filepath: File Path, Filepath used for importing the file
:type filepath: str
:param filter_glob: filter_glob
:type filter_glob: str
:param target: Target, Import target type
:type target: typing.Union[str, int]
:param global_scale: Scale, Scale the BVH by this value
:type global_scale: float
:param frame_start: Start Frame, Starting frame for the animation
:type frame_start: int
:param use_fps_scale: Scale FPS, Scale the framerate from the BVH to the current scenes, otherwise each BVH frame maps directly to a Blender frame
:type use_fps_scale: bool
:param update_scene_fps: Update Scene FPS, Set the scene framerate to that of the BVH file (note that this nullifies the ‘Scale FPS’ option, as the scale will be 1:1)
:type update_scene_fps: bool
:param update_scene_duration: Update Scene Duration, Extend the scene’s duration to the BVH duration (never shortens the scene)
:type update_scene_duration: bool
:param use_cyclic: Loop, Loop the animation playback
:type use_cyclic: bool
:param rotate_mode: Rotation, Rotation conversionQUATERNION Quaternion, Convert rotations to quaternions.NATIVE Euler (Native), Use the rotation order defined in the BVH file.XYZ Euler (XYZ), Convert rotations to euler XYZ.XZY Euler (XZY), Convert rotations to euler XZY.YXZ Euler (YXZ), Convert rotations to euler YXZ.YZX Euler (YZX), Convert rotations to euler YZX.ZXY Euler (ZXY), Convert rotations to euler ZXY.ZYX Euler (ZYX), Convert rotations to euler ZYX.
:type rotate_mode: typing.Union[str, int]
:param axis_forward: Forward
:type axis_forward: typing.Union[str, int]
:param axis_up: Up
:type axis_up: typing.Union[str, int]
'''
pass
| 50.434783
| 468
| 0.702586
|
import sys
import typing
def bvh(filepath: str = "",
filter_glob: str = "*.bvh",
target: typing.Union[str, int] = 'ARMATURE',
global_scale: float = 1.0,
frame_start: int = 1,
use_fps_scale: bool = False,
update_scene_fps: bool = False,
update_scene_duration: bool = False,
use_cyclic: bool = False,
rotate_mode: typing.Union[str, int] = 'NATIVE',
axis_forward: typing.Union[str, int] = '-Z',
axis_up: typing.Union[str, int] = 'Y'):
'''Load a BVH motion capture file
:param filepath: File Path, Filepath used for importing the file
:type filepath: str
:param filter_glob: filter_glob
:type filter_glob: str
:param target: Target, Import target type
:type target: typing.Union[str, int]
:param global_scale: Scale, Scale the BVH by this value
:type global_scale: float
:param frame_start: Start Frame, Starting frame for the animation
:type frame_start: int
:param use_fps_scale: Scale FPS, Scale the framerate from the BVH to the current scenes, otherwise each BVH frame maps directly to a Blender frame
:type use_fps_scale: bool
:param update_scene_fps: Update Scene FPS, Set the scene framerate to that of the BVH file (note that this nullifies the ‘Scale FPS’ option, as the scale will be 1:1)
:type update_scene_fps: bool
:param update_scene_duration: Update Scene Duration, Extend the scene’s duration to the BVH duration (never shortens the scene)
:type update_scene_duration: bool
:param use_cyclic: Loop, Loop the animation playback
:type use_cyclic: bool
:param rotate_mode: Rotation, Rotation conversionQUATERNION Quaternion, Convert rotations to quaternions.NATIVE Euler (Native), Use the rotation order defined in the BVH file.XYZ Euler (XYZ), Convert rotations to euler XYZ.XZY Euler (XZY), Convert rotations to euler XZY.YXZ Euler (YXZ), Convert rotations to euler YXZ.YZX Euler (YZX), Convert rotations to euler YZX.ZXY Euler (ZXY), Convert rotations to euler ZXY.ZYX Euler (ZYX), Convert rotations to euler ZYX.
:type rotate_mode: typing.Union[str, int]
:param axis_forward: Forward
:type axis_forward: typing.Union[str, int]
:param axis_up: Up
:type axis_up: typing.Union[str, int]
'''
pass
| 0
| 0
| 0
|
eb863368aec5ab18e9d5732dca071219bfaab1e7
| 329
|
py
|
Python
|
1stRound/Easy/728 Self Dividing Numbers/Filter.py
|
ericchen12377/Leetcode-Algorithm-Python
|
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
|
[
"MIT"
] | 2
|
2020-04-24T18:36:52.000Z
|
2020-04-25T00:15:57.000Z
|
1stRound/Easy/728 Self Dividing Numbers/Filter.py
|
ericchen12377/Leetcode-Algorithm-Python
|
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
|
[
"MIT"
] | null | null | null |
1stRound/Easy/728 Self Dividing Numbers/Filter.py
|
ericchen12377/Leetcode-Algorithm-Python
|
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
|
[
"MIT"
] | null | null | null |
left = 1
right = 22
p = Solution()
print(p.selfDividingNumbers(left,right))
| 41.125
| 111
| 0.68997
|
class Solution(object):
def selfDividingNumbers(self, left, right):
is_self_dividing = lambda num: '0' not in str(num) and all(num % int(digit) == 0 for digit in str(num))
return list(filter(is_self_dividing, range(left, right + 1)))
left = 1
right = 22
p = Solution()
print(p.selfDividingNumbers(left,right))
| 204
| 2
| 48
|
e51cf604f2301e3c5c1ccbf90cc8310f8eb78db9
| 1,398
|
py
|
Python
|
patsong/users/views.py
|
didils/PATSONG
|
3747617e70ed2cf67114ace475d1f92bee4e1784
|
[
"MIT"
] | null | null | null |
patsong/users/views.py
|
didils/PATSONG
|
3747617e70ed2cf67114ace475d1f92bee4e1784
|
[
"MIT"
] | null | null | null |
patsong/users/views.py
|
didils/PATSONG
|
3747617e70ed2cf67114ace475d1f92bee4e1784
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
from rest_auth.registration.views import SocialLoginView
| 26.884615
| 80
| 0.638054
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
from rest_auth.registration.views import SocialLoginView
class ChangePassword(APIView):
def put(self, request, username, format=None):
user = request.user
if user.username == username:
current_password = request.data.get('current_password', None)
if current_password is not None:
passwords_match = user.check_password(current_password)
if passwords_match:
new_password = request.data.get('new_password', None)
if new_password is not None:
user.set_password(new_password)
user.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class FacebookLogin(SocialLoginView):
adapter_class = FacebookOAuth2Adapter
| 979
| 67
| 73
|
563a0e2a62d5b3da6ba694bb732f0d298837ea5f
| 294
|
py
|
Python
|
Python/django/myFirstDjangoProject/rdmsproj/rdmsapp/tests.py
|
honchardev/Fun
|
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
|
[
"MIT"
] | null | null | null |
Python/django/myFirstDjangoProject/rdmsproj/rdmsapp/tests.py
|
honchardev/Fun
|
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
|
[
"MIT"
] | 3
|
2020-03-24T16:26:35.000Z
|
2020-04-15T19:40:41.000Z
|
Python/django/myFirstDjangoProject/rdmsproj/rdmsapp/tests.py
|
honchardev/Fun
|
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from rdmsapp.models import Contact
class ContactTests(TestCase):
"""Contact model tests."""
| 22.615385
| 46
| 0.605442
|
from django.test import TestCase
from rdmsapp.models import Contact
class ContactTests(TestCase):
"""Contact model tests."""
def test_str(self):
contact = Contact(
first_name='a',
last_name='b',
)
self.assertEquals(str(contact), 'a b')
| 137
| 0
| 26
|