query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Given a Voronoi Object, this method is in charge of obtaining the volume of each Voronoi Cell, and classifying it as a boundary cell if one of its vertex indices is 1 or if any of its vertices is outside the domain of interest
def voronoiVolumes(self, vor): volumes = np.array([]) data = vor.points limits = [[np.min(data[:, 0]), np.max(data[:, 0])], [np.min(data[:, 1]), np.max(data[:, 1])], [np.min(data[:, 2]), np.max(data[:, 2])]] nonB = [False for _ in data] for i, region in enumerate(vor.point_region): indices = vor.regions[region] if -1 not in indices: v = vor.vertices[indices] isWithin = self.checkVertices(v, limits) if isWithin: volumes = np.append(volumes, ConvexHull(v).volume) nonB[i] = True return volumes, nonB
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def voronoi(geometry,\n pore_volume='pore.volume',\n **kwargs):\n from scipy.special import cbrt\n pore_vols = geometry[pore_volume]\n value = cbrt(6*pore_vols/_sp.pi)\n return value", "def get_outer_boundary_of_voronoi(self):\n edge = [edge for edge in self.edges if not ...
[ "0.6815165", "0.6787288", "0.65719616", "0.6431204", "0.63884205", "0.6349469", "0.6345909", "0.62319404", "0.6147917", "0.6102491", "0.60988635", "0.6086924", "0.60158694", "0.5977933", "0.596923", "0.59498054", "0.5903642", "0.58810025", "0.5857406", "0.57596266", "0.573259...
0.6796481
1
Given a set of Voronoi Vertices, this simple methods checks if all of them are maintained within the range of the form [[xmin, xmax], [ymin, ymax], [zmin, zmax]] expressed in limits
def checkVertices(vertices, limits): isWithin = True for i,v in enumerate(vertices): x = v[0] y = v[1] z = v[2] if x < limits[0][0] or x > limits[0][1]: isWithin = False break if y < limits[1][0] or y > limits[1][1]: isWithin = False break if z < limits[2][0] or z > limits[2][1]: isWithin = False break return isWithin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_box_bounds(self, test_vec):\n above_min = np.greater(test_vec, self.lower_vertex).all()\n below_max = np.greater(self.upper_vertex, test_vec).all()\n return above_min and below_max", "def add_boundaries(self):\n\n bound_conns=[]\n bound_coords=[]\n bound_vert_inde...
[ "0.6560554", "0.62124604", "0.61240464", "0.6044191", "0.60034245", "0.59941345", "0.5940613", "0.5885276", "0.5870029", "0.5853414", "0.58431876", "0.5830641", "0.58024144", "0.57611763", "0.5759779", "0.57531", "0.57513684", "0.5713757", "0.57067126", "0.5697985", "0.566842...
0.70169705
0
This method determines at which normalized Voronoi volumes do the Random PDF and the obtained PDF intersect
def intersectPDFs(self, threshold=1): diff = np.abs(self.PDF - self.RandomPDF) half = np.argmax(self.RandomPDF) start = np.nonzero(self.PDF > 0.5*np.max(self.PDF))[0][0] end = np.nonzero(self.RandomPDF[half:] < 0.5*np.max(self.RandomPDF))[0][0] + half if start == 0 and half == 0: self.cut1 = 0 else: self.cut1 = np.argmin(diff[start:half]) + start self.V1 = self.bins[self.cut1] * threshold self.cut2 = np.argmin(diff[half:end]) + half self.V2 = self.bins[self.cut2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def volumePDF(self, maxVar=-1, bins=75, threshold=1):\n print('Cluster Identification Based on Voronoi Volumes')\n start = time.time()\n self.vor = Voronoi(self.data)\n self.volumes, self.nonB = self.voronoiVolumes(self.vor)\n self.nonBI = np.arange(0, len(self.vor.point_region))...
[ "0.6731937", "0.5723254", "0.56301624", "0.5573013", "0.55410963", "0.5535367", "0.55213", "0.5521294", "0.54995483", "0.54817283", "0.5443941", "0.53623676", "0.53475815", "0.53398156", "0.5310163", "0.5289071", "0.5283833", "0.5266256", "0.5254226", "0.5251348", "0.5249623"...
0.68404734
0
This method tracks the evolution of the first intersection between PDFs with the number of bins in the PDF
def optimumBins(self, b0=100, b1=10000, n=100): self.intersections = [] for i in np.linspace(b0, b1, n): self.volumePDF(bins=i) self.intersections.append(self.V1) plt.figure() plt.plot(np.linspace(b0, b1, n), self.intersections) plt.xlabel('Number of Bins [-]') plt.ylabel('Normed Voronoi Volume of Intersection [-]') plt.title('Evolution of Intersection Volume with Number of Bins')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intersectPDFs(self, threshold=1):\n diff = np.abs(self.PDF - self.RandomPDF)\n half = np.argmax(self.RandomPDF)\n start = np.nonzero(self.PDF > 0.5*np.max(self.PDF))[0][0]\n end = np.nonzero(self.RandomPDF[half:] < 0.5*np.max(self.RandomPDF))[0][0] + half\n\n if start == 0 an...
[ "0.69076186", "0.5713038", "0.56754965", "0.5481251", "0.5395632", "0.5379876", "0.5379819", "0.52434504", "0.51830965", "0.51680446", "0.51668954", "0.51181227", "0.5096356", "0.5096356", "0.5096356", "0.5096356", "0.5096356", "0.5079256", "0.5078702", "0.50733835", "0.50440...
0.6194617
1
Plots all particles, sorting them into cluster or noncluster particles according to the Voronoi classification
def plotClusters(self): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') fig.set_size_inches(18.5, 9.5) ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22) ax.set_xlabel('x [m]', fontsize=18) ax.set_ylabel('y [m]', fontsize=18) ax.set_zlabel('z [m]', fontsize=18) strength = np.linspace(0, 0.8, len(self.unique_labels)) np.random.shuffle(strength) colors = [plt.cm.nipy_spectral(each) for each in strength] np.random.shuffle(strength) colorsB = [plt.cm.nipy_spectral(each) for each in strength] for k, col, colB in zip(self.unique_labels, colors, colorsB): a = 1 s = 3 if k == -1: # Black used for noise. col = [1, 0, 0] a = 0.3 s = 1 class_member_mask = (self.labels == k) xy = self.data[class_member_mask] if len(xy) > 0: ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)), edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plotResults(self):\n\n clusters = self.data[[i for i in range(len(self.data)) if self.vorLabels[i] != 0], :]\n vorLabels = [self.vorLabels[i] for i in range(len(self.data)) if self.vorLabels[i] != 0]\n\n self.plot = voronoiPlot(clusters, self.skel, self.skelLabels, self.isCorrect, vorLabel...
[ "0.67489004", "0.6729127", "0.67152536", "0.6654016", "0.66303104", "0.6488753", "0.6219483", "0.6171645", "0.61682975", "0.6068598", "0.60134363", "0.59585845", "0.5934958", "0.59331185", "0.5924034", "0.58946514", "0.5883884", "0.5861032", "0.5849204", "0.58180285", "0.5810...
0.7175168
0
Plots a single Voronoi cell, with its Voronoi vertices as well. To gain perspective wrt to the rest of the points, the limits of the plots are set according to the limits of all the point positions.
def plotVoronoiCell(self, cells): for i in cells: #i indexes volumes i = self.nonBI[i] #now i indexes vor.point_region vI = self.vor.regions[self.vor.point_region[i]] v = self.vor.vertices[vI, :] r = v fig = plt.figure() ax = fig.add_subplot(111, projection='3d') fig.set_size_inches(18.5, 9.5) ax.set_title('Voronoi Cell of Particle ' + str(i)) ax.set_xlabel('x [m]') ax.set_ylabel('y [m]') ax.set_zlabel('z [m]') ax.scatter(r[:, 0], r[:, 1], r[:, 2], s=5, alpha=0.5, label='Cell Boundaries') ax.scatter(self.data[i, 0], self.data[i, 1], self.data[i, 2], s=25, label='Cell Center') ax.set_xlim3d(np.min(self.data[:, 0]), np.max(self.data[:, 0])) ax.set_ylim3d(np.min(self.data[:, 1]), np.max(self.data[:, 1])) ax.set_zlim3d(np.min(self.data[:, 2]), np.max(self.data[:, 2])) # limits = np.vstack((np.array([np.max(self.data[:, 0]), np.max(self.data[:, 1]), np.max(self.data[:, 2])]), np.array([np.min(self.data[:, 0]), np.min(self.data[:, 1]), np.min(self.data[:, 2])]))) # ax.scatter(limits[:, 0], limits[:, 1], limits[:, 2], s=1) ax.legend()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_vor(self,x,ax,tri=False):\n\n L = self.L\n grid_x, grid_y = np.mgrid[-1:2, -1:2]\n grid_x[0, 0], grid_x[1, 1] = grid_x[1, 1], grid_x[0, 0]\n grid_y[0, 0], grid_y[1, 1] = grid_y[1, 1], grid_y[0, 0]\n y = np.vstack([x + np.array([i * L, j * L]) for i, j...
[ "0.672641", "0.65702593", "0.65630525", "0.65132475", "0.64372486", "0.6365562", "0.6342777", "0.6291183", "0.62710994", "0.59524184", "0.58798623", "0.5819559", "0.5795768", "0.57930243", "0.57612365", "0.56800836", "0.5656755", "0.5648358", "0.5621595", "0.5620752", "0.5595...
0.78701174
0
This method is simply in charge of plotting a bar plot comparing cluster volumes
def volumePlot(self, top=10): fig = plt.figure() fig.set_size_inches(18.5, 9.5) ax = fig.add_subplot(111) label = ['Cluster ' + str(i) for i in range(1, len(self.volumesC) + 1)] volumesC = np.sort(self.volumesC)[::-1][:top] sortI = np.argsort(self.volumesC)[::-1][:top] label = [label[i] for i in sortI] cmap = plt.get_cmap('plasma') c = cmap(volumesC) ax.bar(range(top), volumesC, tick_label=label, width=0.5, color=c) ax.tick_params(labelsize=18) plt.ylabel('Volume [m^3]', fontsize=18) plt.title('Volume per Cluster', fontsize=22) plt.savefig('Voronoi Volumes per Cluster')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bar_charts(cluster, genelist, groups=[\"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\", \"FL\", \"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\" ], postfix=''):\n\n limits = cluster.reorder_matrix(groups)\n pp = PdfPages(cluster.exportPath[0:-4] + postfix + '.bar_plots.pdf')\n\n # get kegg pathw...
[ "0.6847775", "0.65582955", "0.6480496", "0.64798915", "0.6471344", "0.6433542", "0.6422154", "0.63602215", "0.6318244", "0.62704057", "0.6259151", "0.6216085", "0.6214352", "0.6189896", "0.61803657", "0.61696666", "0.61290234", "0.6117881", "0.6092647", "0.6063931", "0.602908...
0.7068967
0
For a Voronoi ridge specified by i, this method processes the adjacent Voronoi cell centers, assigning the corresponding cluster label to each of them.
def forPointPair(self, i): areCluster = [self.isCluster[j] for j in self.pairs[i]] if sum(areCluster) > 1: #If at least two neighboring cells are cluster cells, four possible cases exist: 1. none of them have been previously #labeled and thus a new cluster label has to be defined, 2. all have been labeled with the same cluster label #and as a result nothing is to be done, 3. only few of them has been labeled with a cluster label which is #then propagated to the other cells, 4. or several have been assigned different cluster labels, and thus the older #cluster label has to be propagated. labels = [self.labels[j] for j in self.pairs[i]] already = [j != -1 for j in labels] if sum(already) == 0: #None of the cell centers have been assigned a cluster label for j,p in enumerate(self.pairs[i]): if areCluster[j]: self.labels[p] = self.maxLabel self.maxLabel += 1 else: #At least one of the cell centers has been assigned a cluster label contesting = [j for j in labels if j != -1] toAssign = min(contesting) for j,p in enumerate(self.pairs[i]): if areCluster[j]: if labels[j] == -1: self.labels[p] = toAssign elif labels[j] != toAssign: self.propagateLabel(toAssign, labels[j]) self.maxLabel = np.max(self.labels) + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.r...
[ "0.6171579", "0.6096346", "0.6085736", "0.60224193", "0.60157436", "0.5906784", "0.59040046", "0.5876902", "0.5802641", "0.5690463", "0.5554491", "0.5509175", "0.543474", "0.5428747", "0.5418654", "0.5400812", "0.53619266", "0.5357413", "0.5356873", "0.5355492", "0.5326791", ...
0.6685486
0
This method solves a conflict of labels by propagating the older (lower) label to the Voronoi cells labeled with the newer label
def propagateLabel(self, l1, l2): if l1 != l2: winner = min(l1, l2) loser = max(l1, l2) loserN = 0 superiorN = 0 for i,l in enumerate(self.labels): if l == loser: loserN += 1 self.labels[i] = winner if l > loser: superiorN += 1 self.labels[i] = l - 1 # print('Loser Label is ' + str(loser) + ' . With ' + str(loserN) + ' associated cells. Winner label is ' + str(winner))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def propagate_labels(image,labels,conflict=0):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n oops = -(1<<30)\n for o,i in cors.T:\n if outputs[o]!=0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs==oops...
[ "0.62960845", "0.5878045", "0.5808923", "0.57926816", "0.5774841", "0.56972456", "0.5681934", "0.56174916", "0.5592107", "0.5577403", "0.5546397", "0.5535639", "0.5447317", "0.5445591", "0.5424166", "0.5400711", "0.5394687", "0.53925383", "0.53905594", "0.53837645", "0.535590...
0.65136176
0
Main method of the class, in charge of examining skeleton cluster label and presenting results
def run(self): for l in self.uniqueSkel: mask = np.arange(len(self.skel))[self.skelLabels == l] counts = self.findNearest(mask) self.memberships[l] = counts #self.memberships is an array of as many rows as skeleton labels and as many columns as Voronoi cluster labels, #where the i-th row shows for all skeleton points of cluster label i, how many belong to each of the Voronoi #cluster labels. More precisely, the j-th column of the i-th row of this array shows how many skeleton points #of cluster label i have a closest Voronoi cell center of label j. print('Out of ' + str(len(self.skel)) + ' skeleton points, ' + str(sum(self.memberships[:, 0])) + ' (' + str(round(sum(self.memberships[:, 0]) * 100/len(self.skel), 3)) + ' %) appear in areas classified as void areas by Voronoi') for l in self.uniqueSkel: members = sum(self.skelLabels == l) topVor = np.argsort(self.memberships[l])[::-1][:5] - 1 counts = np.sort(self.memberships[l])[::-1][:5] print('For the ' + str(members) + ' skeleton points with label ' + str(l) + ': ') for i in range(5): if counts[i] > 0: if topVor[i] == -1: add = ' ' + str(counts[i]) + ' ( ' + str(round(counts[i] * 100 / members, 3)) + ' %) are not associated with a Voronoi cluster cell' else: add = ' ' + str(counts[i]) + ' ( ' + str(round(counts[i] * 100/ members, 3)) + ' %) belong to the Voronoi Cluster with label ' + str(topVor[i]) print(add) self.plotResults()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n dist = \"Euclidean\"\n path = \"\"\n k_v = 2\n error = []\n k_vals = []\n\n for i in range(len(sys.argv)):\n if sys.argv[i] == \"--path\":\n path = sys.argv[i+1]\n if sys.argv[i] == \"--k\":\n k_v = int(sys.argv[i+1])\n if sys.argv[i] == ...
[ "0.68848646", "0.6841816", "0.65161455", "0.6477813", "0.6471169", "0.6439347", "0.6436151", "0.64239186", "0.63982856", "0.63937217", "0.6327966", "0.632562", "0.6311031", "0.6308998", "0.62794244", "0.6248479", "0.62176025", "0.62134105", "0.6181992", "0.61715055", "0.61619...
0.68589205
1
For a list i of indexes of skeleton point positions, this method examines the closest Voronoi cel center to each skeleton point, and based on this counts how many of the skeleton points belong to each Voronoi label. Note that memberships is a vector where its ith element shows how many of the skeleton positions have a closest Voronoi cell of label i.
def findNearest(self, i): skel = self.skel[i, :] closest = self.nbrs.kneighbors(skel, return_distance=False) memberships = np.zeros(len(self.uniqueVor)) for j, c in enumerate(closest): c = c[0] nearLabel = self.vorLabels[c] memberships[nearLabel] += 1 if nearLabel == 0: self.isCorrect[i[j]] = 0 return memberships
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n for l in self.uniqueSkel:\n mask = np.arange(len(self.skel))[self.skelLabels == l]\n counts = self.findNearest(mask)\n self.memberships[l] = counts\n\n #self.memberships is an array of as many rows as skeleton labels and as many columns as Voronoi clu...
[ "0.70501083", "0.573867", "0.5303514", "0.5237649", "0.5178863", "0.5108157", "0.5104332", "0.50592184", "0.50462115", "0.5019821", "0.5018306", "0.5014337", "0.5011559", "0.49469694", "0.49384215", "0.49318594", "0.49196318", "0.49063683", "0.4904543", "0.4899196", "0.489541...
0.63530916
1
Perform n Bernoulli trials with success probability p and return number of successes.
def perform_bernoulli_trials(n, p): # Initialize number of successes: n_success n_success = 0 # Perform trials for i in range(n): # Choose random number between zero and one: random_number random_number = np.random.random() # If less than p, it's a success so add one to n_success if random_number < p: n_success += 1 return n_success
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add ...
[ "0.8913371", "0.89072156", "0.8900664", "0.7544249", "0.75013113", "0.7363845", "0.7102742", "0.6818586", "0.6704421", "0.6702102", "0.6523535", "0.6489954", "0.6467495", "0.6451158", "0.6448554", "0.63664526", "0.6310604", "0.6278471", "0.6261832", "0.62180156", "0.615847", ...
0.8910949
1
Test get_posts without ids
def test_get_posts_missing_ids(client): response = client.simulate_get('/page/get_records') assert response.status_code == 400
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_posts_fail(self):\n response = self.client.get(reverse('posts:post-list'))\n self.assertEqual(response.status_code, 200)", "def testGetNonExistantPost(self):\n response = self.client.get(\"/api/posts/1\", headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual...
[ "0.676717", "0.65408266", "0.6508336", "0.64182526", "0.63817656", "0.6342567", "0.617842", "0.61667824", "0.6114686", "0.6045117", "0.5996226", "0.5942765", "0.59425503", "0.5932324", "0.5903278", "0.58965063", "0.5880125", "0.5872758", "0.5825675", "0.577318", "0.576698", ...
0.65633947
1
Test create_record with empty post body
def test_create_record_empty(client): response = client.simulate_post('/page/create_record') assert response.status_code == 400
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_record(self):\n body = [RecordModel()]\n response = self.client.open(\n '//records/create',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body...
[ "0.7912478", "0.77811354", "0.7402375", "0.7200082", "0.7187702", "0.7172909", "0.6987341", "0.6975383", "0.68595964", "0.67589927", "0.67496216", "0.67295873", "0.6715952", "0.67064273", "0.6694563", "0.66786766", "0.66364396", "0.6629828", "0.6628264", "0.662336", "0.660980...
0.83715445
0
line original line of setuRace( racename.... pos index of the open parhenthesis isRuntimeRace True if the race is been created from interface (not loaded from file) return True if succefully created, False otherwise or exception launched
def newRace(self, line, pos, isRuntimeRace=False): c = line.find(")") if c == -1: params = [] else: params = line[pos:c].split(",") # --SetupRace has several parameters: # -- racename to show on screen (no spaces nor underscore) # -- array of checkpoints positions # -- is loop ? true or false (in lowercase, please) # -- Print Altitude of the next checpoint on screen ? true or false (in lowercase, please) r = raceClass(self) try: r.name = params[0][params[0].find("'") + 1 :params[0].rfind("'")] r.pointsVar[0] = params[1].replace(" ", "") r.isLoop = params[2] == "true" r.showAltitude = params[3] == "true" if not isRuntimeRace: self._getRange(r.pointsVar) self._parseArray(r.pointsVar, r, r.points) except Exception: del r return False else: self.races.append(r) self.modified = isRuntimeRace return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutatraj(self,line):\n line=line.strip().split()\n no=line[1];mutares=line[2]\n typelines=open(self.mainpath+'/A-R.dat','r').readlines() \n mutapath=os.path.join(self.mainpath,no+mutares)\n if not os.path.exists(mutapath):os.makedirs(mutapath)\n rosettapdb=os.path.joi...
[ "0.5446975", "0.5230643", "0.5115488", "0.50672674", "0.50217557", "0.48601362", "0.4847687", "0.4847687", "0.48320773", "0.48207554", "0.48207554", "0.48164436", "0.4721421", "0.46992648", "0.4686111", "0.46842042", "0.46652928", "0.4639667", "0.4631652", "0.45743117", "0.45...
0.7775612
0
return a list with race names
def getRaceList(self): l = [] for r in self.races: l.append(r.name) return l
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def race(self, instance):\r\n return '/'.join([raza.name for raza in instance.user.profile.race.all()])", "def names(self) -> list[str]:", "def getNames(self) -> List[unicode]:\n ...", "def get_seq_names(self) -> List[str]:\n return [seq.Name.lower() for seq in self.Sequencers]", "def ...
[ "0.72669965", "0.65117115", "0.63962364", "0.6379379", "0.6349073", "0.6317211", "0.61734116", "0.615667", "0.6151759", "0.6090637", "0.6047119", "0.6046609", "0.60214084", "0.5969723", "0.59672964", "0.5934459", "0.5914037", "0.58994067", "0.587215", "0.587215", "0.58706605"...
0.7996814
0
get a list to put in the interface irace the integer index of races list
def getCheckpointList(self, irace): r = [] if irace >= len(self.races): return r # new race # msg = "Toolkit Error: trying to get a checkpoint from race that doesn't exists (bad index %d, actual length %d " % (irace, len(self.races)) # raise showedError(msg) therace = self.races[irace] if len(therace.points) > 0: for i in range(len(therace.points)): line = " %.3d - %s " % (i, therace.points[i]['gate']) r.append(line) self.raceIndex = irace therace.showCheckpoints(True) return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRaceList(self):\n\t\tl = []\n\t\tfor r in self.races:\n\t\t\tl.append(r.name)\n\t\treturn l", "def enrollment_list(loc:List[CrimeStatistics])->List[int]:\n # return [] #stub\n # template from List[CrimeStatistics]\n # enrollments is all enrollment values seen so far\n enrollments = [] # type: ...
[ "0.6523289", "0.54145765", "0.5309539", "0.5160551", "0.51471996", "0.51424783", "0.512308", "0.5070747", "0.50332165", "0.5015577", "0.4969441", "0.49606994", "0.49606994", "0.4927924", "0.4920787", "0.49111506", "0.49075723", "0.49041528", "0.48502666", "0.4835571", "0.4825...
0.5926581
1
show or hide checkpoints of this race
def showCheckpoints(self, value=True): try: self.owner.showingcheckpoints = True for i in range(len(self.points)): if self.points[i]['entry'] is None and value: self.points[i]['entry'] = self.owner._ogreWin.addGeneralObject(self.points[i]['gate'] + '.odef', self.points[i]['pos'].asTuple, self.points[i]['rot'].asTuple) #set up callback, just a pointer to the method elif not self.points[i]['entry'] is None: self.points[i]['entry'].visible = value finally: self.owner.showingcheckpoints = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view():\n\n checkpoint_ini = parse_checkpoint_ini()\n run_start = checkpoint_ini[\"refget_ena_checkpoint\"][\"run_start\"]\n msg = \"the ena-refget-scheduler is currently configured to run, starting \" \\\n + \"from \" + run_start\n print(msg)", "def show(self) -> None:\n thr_is_a...
[ "0.5596374", "0.55433595", "0.5335721", "0.53062624", "0.52888876", "0.52786565", "0.5234352", "0.5203127", "0.5177826", "0.51289123", "0.51103914", "0.5096311", "0.5087622", "0.5080283", "0.5055638", "0.50398296", "0.5034587", "0.5034587", "0.5012699", "0.5010719", "0.499653...
0.61171097
0
Subclass this method in a platform module to configure the DMD. This method should return a reference to the DMD's platform interface method will will receive the frame data.
def configure_dmd(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_dmd(self) -> \"DmdPlatformInterface\":\n raise NotImplementedError", "def configure_rgb_dmd(self, name: str) -> \"DmdPlatformInterface\":\n raise NotImplementedError", "def __init__(self, dataFrame):\n self.dataFrame = dataFrame", "def __init__(self):\n super().__init__(...
[ "0.7257079", "0.5769627", "0.5761178", "0.57511663", "0.5472211", "0.5416792", "0.5408845", "0.5397852", "0.53871006", "0.5366009", "0.53453034", "0.5293592", "0.52530783", "0.5228261", "0.5221702", "0.5218166", "0.52106", "0.5198825", "0.51540816", "0.5151581", "0.5141395", ...
0.6840002
1
Initialise I2C platform and set feature.
def __init__(self, machine): super().__init__(machine) self.features['has_i2c'] = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255", "d...
[ "0.69223726", "0.69223726", "0.6696322", "0.6669789", "0.661578", "0.6472706", "0.6393477", "0.6341301", "0.6272525", "0.62672776", "0.61610824", "0.61264825", "0.6056944", "0.604401", "0.6024838", "0.59611595", "0.591767", "0.59086883", "0.5897186", "0.5890242", "0.5880437",...
0.7734488
0
Write an 8bit value to a specific address and register via I2C.
def i2c_write8(self, address, register, value): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, register, value): #good\r\n\t\tself.i2c.write8(register, value)", "def write8(self, register, value):\n raise NotImplementedError", "def _i2c_write(self, register, value, bank=None):\n if bank is not None:\n self.set_bank(bank)\n self.i2c.write_byte_data(self.ad...
[ "0.81820816", "0.7564525", "0.7462808", "0.711898", "0.71006817", "0.6852014", "0.6789489", "0.6774337", "0.6672894", "0.6656542", "0.6608093", "0.6608093", "0.6605974", "0.6600265", "0.6553627", "0.6430836", "0.63670194", "0.6350984", "0.6313063", "0.6307843", "0.6297266", ...
0.872866
0
Read an 8bit value from an address and register via I2C.
def i2c_read8(self, address, register): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(self, register): #good\r\n\t\tcurrentVal = self.i2c.readU8(register)\r\n\t\treturn currentVal", "def read_byte(fd, reg):\n b, = write_read_i2c(fd, bytes([reg]), 1)\n return b", "def i2c_write8(self, address, register, value):\n raise NotImplementedError", "def _i2c_read(self, register, ...
[ "0.7447897", "0.7203795", "0.71414053", "0.7111455", "0.67797315", "0.66714233", "0.66070837", "0.6474858", "0.64660287", "0.64660287", "0.6404863", "0.6402492", "0.6393998", "0.6368525", "0.6337723", "0.6311798", "0.6286579", "0.628637", "0.6215728", "0.6214058", "0.6214058"...
0.8244201
0
Read an 16bit value from an address and register via I2C.
def i2c_read16(self, address, register): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read16bit(self, register):\n valuearray = bytearray(self.device.readregistermulti(register, 2))\n return struct.unpack('!H', valuearray)[0]", "def _i2c_read(self, register, bank=None):\n if bank is not None:\n self.set_bank(bank)\n return self.i2c.read_byte_data(self.ad...
[ "0.69229364", "0.6757217", "0.66875", "0.66865295", "0.6567214", "0.6475658", "0.64697033", "0.6447846", "0.639497", "0.6385862", "0.6369454", "0.6283643", "0.6235697", "0.6234341", "0.62243325", "0.619493", "0.61311156", "0.6094481", "0.60801494", "0.6063816", "0.6032868", ...
0.8427159
0
Configure a servo device in paltform.
def configure_servo(self, config): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_servo(self, board):\n self.servo = board.get_pin(f\"d:{self.pin}:p\")\n board.servo_config(\n pin = self.pin,\n min_pulse = 544,\n max_pulse = 2400,\n angle = 93\n )", "async def configure_servo(self, number: str) -> \"Servo...
[ "0.77946854", "0.7138827", "0.6989722", "0.6734098", "0.6667007", "0.6605694", "0.65124875", "0.63172555", "0.631123", "0.6249312", "0.62050116", "0.61567163", "0.61037195", "0.6098429", "0.60313237", "0.6020388", "0.6019371", "0.599168", "0.5991535", "0.5964606", "0.5962235"...
0.72971183
1
Subclass this method in a platform module to configure a matrix light. This method should return a reference to the matrix lights's platform interface object which will be called to access the hardware.
def configure_matrixlight(self, config): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_light(self, number: str, subtype: str, config: LightConfig,\n platform_settings: dict) -> \"LightPlatformInterface\":\n raise NotImplementedError", "def port_maker(self, platform):\n raise NotImplementedError()", "def _init_hardware(self):\n return", ...
[ "0.6384125", "0.60540134", "0.5839954", "0.5778105", "0.5726734", "0.5701559", "0.5663591", "0.56370527", "0.5621239", "0.56157774", "0.56157774", "0.55837554", "0.5552895", "0.55357146", "0.55314416", "0.55262935", "0.5517123", "0.5507401", "0.5503909", "0.5472796", "0.54349...
0.67870253
0
Subclass this method in a platform module to configure a switch. This method should return a reference to the switch's platform interface object which will be called to access the hardware.
def configure_switch(self, config): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict) -> \"SwitchPlatformInterface\":\n raise NotImplementedError", "def _init_hardware(self):\n return", "def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n host = config.get(CONF_HOST)\...
[ "0.75633675", "0.6523127", "0.65048945", "0.6400696", "0.6324981", "0.628187", "0.625404", "0.62352747", "0.62352747", "0.6210974", "0.62051237", "0.6186631", "0.61536473", "0.61463755", "0.60833514", "0.60233676", "0.6022216", "0.60210854", "0.5984712", "0.59599894", "0.5923...
0.65371144
1
Return config section for additional switch config items.
def get_switch_config_section(cls): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_switch_config_section(cls) -> Optional[str]:\n return None", "def get_section(self,name):\n if self.__config.has_section(name):\n data={}\n for opt,val in self.__config.items(name):\n data[opt]=val\n return data\n else:\n rai...
[ "0.70081085", "0.6003366", "0.5735365", "0.5721832", "0.5691188", "0.569049", "0.56896424", "0.5651286", "0.5600963", "0.5576065", "0.5570528", "0.55503464", "0.5539065", "0.5482666", "0.5422852", "0.5421628", "0.54125065", "0.53546286", "0.53522193", "0.53439945", "0.5322708...
0.7165999
0
Return config section for additional switch config overwrite items.
def get_switch_overwrite_section(cls): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_switch_config_section(cls):\n return None", "def get_switch_config_section(cls) -> Optional[str]:\n return None", "def validate_switch_overwrite_section(self, switch: Switch, config_overwrite: dict) -> dict:\n switch.machine.config_validator.validate_config(\n \"switch_o...
[ "0.68945116", "0.6773478", "0.6018817", "0.58323014", "0.5795032", "0.5730897", "0.57164955", "0.5677969", "0.56104964", "0.55940247", "0.5481566", "0.54292023", "0.54252213", "0.5420217", "0.54023397", "0.53998023", "0.5392274", "0.5382826", "0.5382826", "0.53691185", "0.536...
0.69502443
0
Validate switch overwrite section for platform.
def validate_switch_overwrite_section(self, switch: Switch, config_overwrite: dict) -> dict: switch.machine.config_validator.validate_config( "switch_overwrites", config_overwrite, switch.name, base_spec=self.__class__.get_switch_overwrite_section()) return config_overwrite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_switch_section(self, switch: \"Switch\", config: dict) -> dict:\n if self.get_switch_config_section():\n spec = self.get_switch_config_section() # pylint: disable-msg=assignment-from-none\n config = switch.machine.config_validator.validate_config(spec, config, switch.n...
[ "0.6460815", "0.6240742", "0.58130705", "0.576157", "0.55020815", "0.54661757", "0.5350918", "0.53023165", "0.527657", "0.52585673", "0.52316093", "0.5221872", "0.5212852", "0.521263", "0.521263", "0.5193446", "0.51146615", "0.5110619", "0.5091372", "0.50717586", "0.507172", ...
0.7329361
0
Validate a switch config for platform.
def validate_switch_section(self, switch: Switch, config: dict) -> dict: base_spec = ["device"] if self.__class__.get_switch_config_section(): base_spec.append(self.__class__.get_switch_config_section()) switch.machine.config_validator.validate_config( "switches", config, switch.name, base_spec=base_spec) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_switch_section(self, switch: \"Switch\", config: dict) -> dict:\n if self.get_switch_config_section():\n spec = self.get_switch_config_section() # pylint: disable-msg=assignment-from-none\n config = switch.machine.config_validator.validate_config(spec, config, switch.n...
[ "0.77033937", "0.62883526", "0.6265816", "0.6265816", "0.619783", "0.6017843", "0.5960255", "0.57412815", "0.56868637", "0.5671507", "0.5662892", "0.56561095", "0.5582826", "0.5568699", "0.55670327", "0.55480725", "0.5541124", "0.55361867", "0.5534851", "0.55181766", "0.55080...
0.73547333
1
Add driver feature and default max_pulse length.
def __init__(self, machine): super().__init__(machine) # Set default platform features. Each platform interface can change # these to notify the framework of the specific features it supports. self.features['has_drivers'] = True self.features['max_pulse'] = 255
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setInternalPulser(self,pulserEnable,pulseHeight):\n pass", "def set_gesture_pulse_count_and_length(self, pulse_count, pulse_length):\n if not (1 <= pulse_count <= 64):\n raise ValueError(\"pulse_count must be in range [1-64].\")\n if not (APDS_9960.PULSE_LEN_4_MICROS <= pulse_...
[ "0.54564935", "0.52239263", "0.5213172", "0.52111316", "0.5123015", "0.5072573", "0.5040025", "0.49868053", "0.4933255", "0.49097246", "0.49087787", "0.4903712", "0.49020264", "0.48712954", "0.48569748", "0.48522627", "0.48354438", "0.48199806", "0.4814763", "0.48026678", "0....
0.52949923
1
Subclass this method in a platform module to clear a hardware switch rule for this switch. Clearing a hardware rule means actions on this switch will no longer affect coils. Another way to think of this is that it 'disables' a hardware rule. This is what you'd use to disable flippers and autofire_coils during tilt, game over, etc.
def clear_hw_rule(self, switch, coil): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_hw_rule(self, switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError", "def clear_hw_rule(self, switch, coil):\n self.log.info(\"clear_hw_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, switch.hw_switch.number))\n self.communicator.rule_c...
[ "0.8259759", "0.797209", "0.6230499", "0.6211321", "0.6141262", "0.5926914", "0.58824986", "0.5876358", "0.58560795", "0.58331096", "0.57995737", "0.57605374", "0.57516795", "0.5739651", "0.5729749", "0.5706329", "0.5672119", "0.5603738", "0.56002516", "0.5589667", "0.5538710...
0.8435235
0
Return addition config section for coils.
def get_coil_config_section(cls): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_coil_config_section(cls) -> Optional[str]:\n return None", "def get_config(self):\n return self.cat_feats_cfg", "def _getConfigName(self):\n return \"%s_processCoadd_config\" % (self.config.coaddName,)", "def get_rec_config(self):\n conf_map = {}\n if len(self.recon...
[ "0.6987265", "0.61188745", "0.58987737", "0.5838142", "0.5801516", "0.57374656", "0.57162094", "0.5709847", "0.5694724", "0.56832767", "0.5636348", "0.56250817", "0.55275506", "0.5512176", "0.5507755", "0.5460587", "0.5455781", "0.5453585", "0.5423884", "0.5390443", "0.538228...
0.6867581
1
Return addition config section for coils overwrites.
def get_coil_overwrite_section(cls): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_coil_config_section(cls) -> Optional[str]:\n return None", "def get_coil_config_section(cls):\n return None", "def get_config(self):\n config = {\n }\n base_config = super(MatrixConcat, self).get_config()\n return dict(list(base_config.items()) + list...
[ "0.61900616", "0.61157715", "0.6036548", "0.58794725", "0.5742443", "0.57416075", "0.5736262", "0.5693316", "0.5629141", "0.5574493", "0.55668503", "0.5548216", "0.5522465", "0.55043876", "0.5483229", "0.5475912", "0.5463066", "0.54567635", "0.5451854", "0.54403436", "0.54403...
0.6522079
0
Validate coil overwrite config for platform.
def validate_coil_overwrite_section(self, driver, config_overwrite): driver.machine.config_validator.validate_config( "coil_overwrites", config_overwrite, driver.name, base_spec=self.get_coil_overwrite_section()) return config_overwrite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_config(self):\n pass", "def validate_config(self):\n pass", "def validate_config(self):\n pass", "def _check_config(self):\n self._config[\"dataset_name\"] = MetaDataset(self._config[\"dataset_name\"])\n self._config[\"embedding_crop\"] = EmbeddingCrop(\n self....
[ "0.6632182", "0.6598692", "0.6598692", "0.638589", "0.6335491", "0.6256042", "0.60498774", "0.60335827", "0.5984886", "0.59721404", "0.5929158", "0.5917141", "0.58872664", "0.5825252", "0.578265", "0.57337016", "0.57169205", "0.57041675", "0.57027197", "0.57003504", "0.567466...
0.7273753
0
Validate coil config for platform.
def validate_coil_section(self, driver, config): base_spec = ["device"] if self.__class__.get_coil_config_section(): base_spec.append(self.__class__.get_coil_config_section()) driver.machine.config_validator.validate_config( "coils", config, driver.name, base_spec=base_spec) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_coil_section(self, driver, config) -> dict:\n if self.get_coil_config_section():\n spec = self.get_coil_config_section() # pylint: disable-msg=assignment-from-none\n config = driver.machine.config_validator.validate_config(spec, config, driver.name)\n elif config:...
[ "0.72570366", "0.6911317", "0.6911317", "0.6909976", "0.65492195", "0.6416078", "0.62831616", "0.6280884", "0.62649447", "0.62235534", "0.6199735", "0.618072", "0.6152399", "0.61097735", "0.60900956", "0.60865796", "0.6076924", "0.5985597", "0.59361964", "0.58753914", "0.5853...
0.70865804
1
Set pulse on hit and release rule to driver. Pulses a driver when a switch is hit. When the switch is released the pulse is canceled. Typically used on the main coil for dual coil flippers without eos switch.
def set_pulse_on_hit_and_release_rule(self, enable_switch, coil): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError", "def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_...
[ "0.7318226", "0.7116496", "0.71158063", "0.69559383", "0.6919302", "0.6803722", "0.6787461", "0.6785574", "0.67510176", "0.6638195", "0.66331255", "0.64948416", "0.59656495", "0.59290755", "0.5917162", "0.5901438", "0.5824688", "0.5653308", "0.5549026", "0.54918414", "0.53997...
0.74059016
0
Set pulse on hit and enable and release and disable rule on driver. Pulses a driver when a switch is hit. Then enables the driver (may be with pwm). When the switch is released the pulse is canceled and the driver gets disabled. When the second disable_switch is hit the pulse is canceled and the driver gets disabled. Typically used on the main coil for dual coil flippers with eos switch.
def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImpl...
[ "0.7557274", "0.7468426", "0.7387155", "0.72845364", "0.7275343", "0.71724355", "0.7050846", "0.6838765", "0.67255855", "0.6656644", "0.6406962", "0.6232095", "0.6179704", "0.60970235", "0.5676361", "0.5635614", "0.56271976", "0.5584219", "0.55626994", "0.555252", "0.55448014...
0.7729859
0
Set pulse on hit rule on driver. Pulses a driver when a switch is hit. When the switch is released the pulse continues. Typically used for autofire coils such as pop bumpers.
def set_pulse_on_hit_rule(self, enable_switch, coil): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError", "def set_pulse_on_hit_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_swit...
[ "0.7381068", "0.72259647", "0.7224337", "0.7212447", "0.71060675", "0.6980037", "0.6900245", "0.6871108", "0.64783704", "0.6437388", "0.6357339", "0.6284529", "0.62668526", "0.6243657", "0.60278153", "0.58707553", "0.5841685", "0.57336885", "0.57220095", "0.5721565", "0.56812...
0.735149
1
Computes the kronecker product of a sequence of matrices.
def kron(*matrices: np.ndarray) -> np.ndarray: product = np.eye(1) for m in matrices: product = np.kron(product, m) return np.array(product)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kronecker_product(mat1, mat2):\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])", "def _kronecker_product(mat1: tf.Tensor, m...
[ "0.74108386", "0.7209864", "0.7069195", "0.69637865", "0.6961648", "0.68957955", "0.6489303", "0.61083525", "0.60522455", "0.5999372", "0.5961622", "0.59576", "0.5849226", "0.581817", "0.57675886", "0.57403356", "0.5626816", "0.5555709", "0.5546035", "0.55225235", "0.55220044...
0.7603846
0
r""" Matches any differential equation that nth_algebraic can solve. Uses `sympy.solve` but teaches it how to integrate derivatives. This involves calling `sympy.solve` and does most of the work of finding a solution (apart from evaluating the integrals).
def _matches(self): eq = self.ode_problem.eq func = self.ode_problem.func var = self.ode_problem.sym # Derivative that solve can handle: diffx = self._get_diffx(var) # Replace derivatives wrt the independent variable with diffx def replace(eq, var): def expand_diffx(*args): differand, diffs = args[0], args[1:] toreplace = differand for v, n in diffs: for _ in range(n): if v == var: toreplace = diffx(toreplace) else: toreplace = Derivative(toreplace, v) return toreplace return eq.replace(Derivative, expand_diffx) # Restore derivatives in solution afterwards def unreplace(eq, var): return eq.replace(diffx, lambda e: Derivative(e, var)) subs_eqn = replace(eq, var) try: # turn off simplification to protect Integrals that have # _t instead of fx in them and would otherwise factor # as t_*Integral(1, x) solns = solve(subs_eqn, func, simplify=False) except NotImplementedError: solns = [] solns = [simplify(unreplace(soln, var)) for soln in solns] solns = [Equality(func, soln) for soln in solns] self.solutions = solns return len(solns) != 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_differential_equation(f_derivatives, initial, oldest=120):\n bunch = solve_ivp(f_derivatives, t_span=(0, oldest), y0=initial, vectorized=True, dense_output=True)\n return bunch.sol", "def get_equations(self):\n dyn = self.dynamics\n tds = dyn.time_derivatives\n eqs = []\n ...
[ "0.5892715", "0.58814234", "0.5792888", "0.5772103", "0.57659477", "0.5739992", "0.5614944", "0.56034535", "0.5599466", "0.5571074", "0.5567494", "0.5567494", "0.5477489", "0.54729176", "0.545909", "0.5450426", "0.5439577", "0.5438245", "0.54256886", "0.5413706", "0.54007506"...
0.6641433
0
r""" Helper function to match hint ``linear_coefficients``. Matches the expression to the form `(a_1 x + b_1 f(x) + c_1)/(a_2 x + b_2
def _linear_coeff_match(self, expr, func): f = func.func x = func.args[0] def abc(eq): r''' Internal function of _linear_coeff_match that returns Rationals a, b, c if eq is a*x + b*f(x) + c, else None. ''' eq = _mexpand(eq) c = eq.as_independent(x, f(x), as_Add=True)[0] if not c.is_Rational: return a = eq.coeff(x) if not a.is_Rational: return b = eq.coeff(f(x)) if not b.is_Rational: return if eq == a*x + b*f(x) + c: return a, b, c def match(arg): r''' Internal function of _linear_coeff_match that returns Rationals a1, b1, c1, a2, b2, c2 and a2*b1 - a1*b2 of the expression (a1*x + b1*f(x) + c1)/(a2*x + b2*f(x) + c2) if one of c1 or c2 and a2*b1 - a1*b2 is non-zero, else None. ''' n, d = arg.together().as_numer_denom() m = abc(n) if m is not None: a1, b1, c1 = m m = abc(d) if m is not None: a2, b2, c2 = m d = a2*b1 - a1*b2 if (c1 or c2) and d: return a1, b1, c1, a2, b2, c2, d m = [fi.args[0] for fi in expr.atoms(Function) if fi.func != f and len(fi.args) == 1 and not fi.args[0].is_Function] or {expr} m1 = match(m.pop()) if m1 and all(match(mi) == m1 for mi in m): a1, b1, c1, a2, b2, c2, denom = m1 return (b2*c1 - b1*c2)/denom, (a1*c2 - a2*c1)/denom
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_linear_coef(p1, p2):\n slope = (p1[1] - p2[1]) / (p1[0] - p2[0])\n intercept = p1[1] - slope * p1[0]\n return slope, intercept", "def _parse_linear(expr: Expr, symb_only=False):\n\n coeff, factors = expr.as_coeff_mul()\n\n if len(factors) == 0:\n return coeff, None\n ...
[ "0.6206099", "0.6202251", "0.6075639", "0.59964895", "0.5825655", "0.5821187", "0.58134663", "0.5804364", "0.5800974", "0.58002186", "0.57740015", "0.57720554", "0.5732264", "0.570375", "0.5698317", "0.56869346", "0.5648885", "0.5631185", "0.56305605", "0.56204075", "0.561472...
0.72550875
0
Initializes a new input item behaviour. model requires a input item model
def __init__(self, model): super(InputItemBehaviour, self).__init__(model) logging.log(1, "Trace: InputItemBehaviour(%s)" % model) # self._register_events() eventsManager.registerEvent( 'onInputClick-' + self._model.itemName, (pygame.MOUSEBUTTONDOWN), self.onInputClick) self.write( self._model.text if not self._model.empty else self._model.placeHolder, self._model.color if not self._model.empty else (self._model.color[0] / 2, self._model.color[1] / 2, self._model.color[2] / 2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,name,value,*args,**kargs):\n \n kargs['text'] = '' # Force no label\n self.input = value\n InputItem.__init__(self,name,*args,**kargs)\n self.layout().insertWidget(1,self.input)", "def set_inputs(self, item_data):\n self.item_type = item_data[0]\n ...
[ "0.64909756", "0.6275057", "0.6250221", "0.6131106", "0.6091335", "0.60677344", "0.6066739", "0.6032842", "0.6030022", "0.59932554", "0.5961387", "0.5961387", "0.5958153", "0.58987296", "0.58704364", "0.58674127", "0.5828547", "0.5828547", "0.5828547", "0.5828547", "0.5817417...
0.78237134
0
Returns the requested custom interest in full detail.
def GetCustomInterest(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interests(self):\n if \"interests\" in self._prop_dict:\n return self._prop_dict[\"interests\"]\n else:\n return None", "def get_loan_info():\n\n try:\n principal = int(request.get_json()[\"amount\"])\n tenure = int(request.get_json()[\"tenure\"])\n exc...
[ "0.63655275", "0.58175296", "0.5700428", "0.5637553", "0.5635685", "0.5514969", "0.5504437", "0.5473622", "0.54700804", "0.54615504", "0.5434457", "0.5407035", "0.53967106", "0.53456295", "0.53062636", "0.5304055", "0.53011596", "0.5287489", "0.527064", "0.51641273", "0.51545...
0.680991
0
Creates or updates custom interests. Operation statuses are returned.
def MutateCustomInterests(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interests(self, create, extracted, **kwargs):\n if not create:\n return\n if extracted:\n for interest in extracted:\n self.interests.add(interest)", "def post(self):\n try:\n data = request.get_json()\n user_interests = Interest...
[ "0.64842814", "0.63414025", "0.58531773", "0.5730995", "0.5729617", "0.51918006", "0.518725", "0.5175335", "0.50396997", "0.49845538", "0.49697483", "0.49449798", "0.4870831", "0.48692632", "0.48253375", "0.48198012", "0.4756726", "0.46992794", "0.4693637", "0.46757862", "0.4...
0.6584547
0
create_unexisted_dir(directory, element)> create unexisted directory. This function create directory if there are unexisted directory in the path.
def create_unexisted_dir(directory, element): directory += "/" + element if get_file_type(directory) == 0: mkdir(directory) return directory
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_not_existing_directory(\n directory: str\n):\n p = pathlib.Path(directory)\n if not p.is_dir():\n print(f'Creating directory: {directory} as it does not exist')\n p.mkdir(parents=True, exist_ok=True)", "def ifnotexistmkdir(directory):\n if not os.path.exists(directory):\n...
[ "0.76600796", "0.7608169", "0.7521542", "0.73711544", "0.72699314", "0.72553796", "0.7234705", "0.71181333", "0.70318043", "0.7030892", "0.7011069", "0.70085305", "0.70078796", "0.70078796", "0.70018446", "0.6988016", "0.6964505", "0.69333845", "0.69333845", "0.692276", "0.69...
0.90142894
0
Checks if the parameter contains only numbers and atleast of length 3.
def validate_phone_number(val): if not val.isdigit() or len(val) < 3: raise argparse.ArgumentTypeError("Invalid phone number") return val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(number):\n number = compact(number)\n if len(number) != 10:\n raise InvalidLength()\n if not _nipt_re.match(number):\n raise InvalidFormat()\n return number", "def is_int3(items):\n return len(items) == 3 and all(isinstance(item, int) for item in items)", "def validate...
[ "0.6559074", "0.63855785", "0.63790244", "0.6328673", "0.6307101", "0.6269353", "0.6255046", "0.6255046", "0.62493193", "0.6218361", "0.620253", "0.6170183", "0.61353946", "0.6122875", "0.6059919", "0.6043937", "0.6032656", "0.6031185", "0.60122234", "0.60106295", "0.6006608"...
0.6658412
0
Either direct url or file required.
def clean(self): if not self.direct_url and not self.file: raise ValidationError('File or direct url required.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_geturl_purpose(self):\n self.fs.create('foo')\n with self.assertRaises(errors.NoURL):\n self.fs.geturl('foo', '__nosuchpurpose__')", "def is_url_requirement(ireq):\n return bool(ireq.original_link)", "def local(self):\r\n return self._url.scheme in ('', 'file')", "def ...
[ "0.60759676", "0.5991341", "0.5933003", "0.58513236", "0.58213466", "0.58213407", "0.5801431", "0.5788176", "0.5755739", "0.57088137", "0.5697783", "0.5672512", "0.5672512", "0.56354654", "0.5618201", "0.5613041", "0.5595372", "0.55869484", "0.5542433", "0.5506076", "0.549803...
0.61261535
0
Basically Object.assign(GAN_PARAMS, params) See stackoverflow 38987
def step6_set_gan_params(params): global GAN_PARAMS GAN_PARAMS = {**GAN_PARAMS, **params}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_params(self):\n for layer in self.layers:\n for name, value in layer.params.iteritems():\n self.params[name] = value", "def assign_params(sess, params, network):\n for idx, param in enumerate(params):\n assign_op = network.all_params[idx].assign(param)\n ...
[ "0.6051539", "0.60158926", "0.58936363", "0.5835516", "0.582533", "0.57827276", "0.5763045", "0.57321995", "0.5730178", "0.5729452", "0.56903327", "0.5668459", "0.56559336", "0.56548595", "0.56519294", "0.55942523", "0.5588376", "0.55822974", "0.55491924", "0.55464953", "0.55...
0.74238956
0
Cut a map into many chunks based on the chunk_size variable (note_group_size).
def cut_map_chunks(c): r = [] for i in range(0, (c.shape[0] - chunk_size) // step_size): chunk = c[i * step_size:i * step_size + chunk_size] r.append(chunk) return tf.stack(r)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chunks(self, list_to_chunk, size):\n for i in range(0, len(list_to_chunk), size):\n yield list_to_chunk[i:i + size]", "def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]", "def _split_on_chunks(self, iterable, si...
[ "0.6285758", "0.6224028", "0.61347896", "0.6081203", "0.60160613", "0.6003113", "0.5989309", "0.597895", "0.596816", "0.59547454", "0.5915846", "0.5897652", "0.58658063", "0.5865107", "0.58630854", "0.5845289", "0.584348", "0.5822176", "0.58202684", "0.58082134", "0.5794542",...
0.71322733
0
The biggest function here. It takes a tensor with random number as input, with extra variables in extvar (for extvar see the KerasCustomMappingLayer class) var_tensor shape is (batch_size(None), 4 note_group_size) the first dimension is "None", or "?" if you print the shape. It is filled with batch_size in training time. output shape is (batch_size(None), note_group_size, 6) where each last dimension is (x_start, y_start, x_vector, y_vector, x_end, y_end), all mapped to [1,1] range the vector in the middle is supposed to be the direction of cursor after hitting the note The reason this function is this big is that TensorFlow rewrites functions used in the training loop, which includes this one as a "mapping layer". It was amazingly done, but still I have run into troubles with the rewriting many times. That was the reason I didn't dare to reduce it into smaller functions. You might notice I didn't use np calls in this function at all. Yes, it will cause problems. Everything needs to be converted to tf calls instead. Take it in mind if you're editing it.
def construct_map_with_sliders(var_tensor, extvar=[]): var_tensor = tf.cast(var_tensor, tf.float32) var_shape = var_tensor.shape wall_l = 0.15 wall_r = 0.85 x_max = 512 y_max = 384 out = [] cp = tf.constant([256, 192, 0, 0]) phase = 0 # Should be equal to note_group_size half_tensor = var_shape[1]//4 # length multiplier if "length_multiplier" in extvar: length_multiplier = extvar["length_multiplier"] else: length_multiplier = 1 # notedists if "begin" in extvar: begin_offset = extvar["begin"] else: begin_offset = 0 # note_distances_now = length_multiplier * np.expand_dims(note_distances[begin_offset:begin_offset+half_tensor], axis=0); # note_angles_now = np.expand_dims(note_angles[begin_offset:begin_offset+half_tensor], axis=0); # Load external arrays as tensors relevant_tensors = extvar["relevant_tensors"] relevant_is_slider = relevant_tensors["is_slider"] relevant_slider_lengths = relevant_tensors["slider_lengths"] relevant_slider_types = relevant_tensors["slider_types"] relevant_slider_cos = relevant_tensors["slider_cos_each"] relevant_slider_sin = relevant_tensors["slider_sin_each"] relevant_note_distances = relevant_tensors["note_distances"] note_distances_now = length_multiplier * \ tf.expand_dims(relevant_note_distances, axis=0) # init l = tf.convert_to_tensor(note_distances_now, dtype="float32") sl = l * 0.7 cos_list = var_tensor[:, 0:half_tensor * 2] sin_list = var_tensor[:, half_tensor * 2:] len_list = tf.sqrt(tf.square(cos_list) + tf.square(sin_list)) cos_list = cos_list / len_list sin_list = sin_list / len_list wall_l = 0.05 * x_max + l * 0.5 wall_r = 0.95 * x_max - l * 0.5 wall_t = 0.05 * y_max + l * 0.5 wall_b = 0.95 * y_max - l * 0.5 # rerand = tf.cast(tf.greater(l, y_max / 2), tf.float32); # not_rerand = tf.cast(tf.less_equal(l, y_max / 2), tf.float32); tick_diff = extvar["tick_diff"] # max_ticks_for_ds is an int variable, converted to float to avoid potential type error use_ds = tf.expand_dims(tf.cast(tf.less_equal( tick_diff, extvar["max_ticks_for_ds"]), tf.float32), axis=0) # rerand = not use distance snap rerand = 1 - use_ds not_rerand = use_ds next_from_slider_end = extvar["next_from_slider_end"] # Starting position if "start_pos" in extvar: _pre_px = extvar["start_pos"][0] _pre_py = extvar["start_pos"][1] _px = tf.cast(_pre_px, tf.float32) _py = tf.cast(_pre_py, tf.float32) else: _px = tf.cast(256, tf.float32) _py = tf.cast(192, tf.float32) # this is not important since the first position starts at _ppos + Δpos _x = tf.cast(256, tf.float32) _y = tf.cast(192, tf.float32) # Use a buffer to save output outputs = tf.TensorArray(tf.float32, half_tensor) for k in range(half_tensor): # r_max = 192, r = 192 * k, theta = k * 10 rerand_x = 256 + 256 * var_tensor[:, k] rerand_y = 192 + 192 * var_tensor[:, k + half_tensor*2] # Distance snap start # If the starting point is close to the wall, use abs() to make sure it doesn't go outside the boundaries delta_value_x = l[:, k] * cos_list[:, k] delta_value_y = l[:, k] * sin_list[:, k] # It is tensor calculation batched 8~32 each call, so if/else do not work here. wall_value_l = tf.cast(tf.less(_px, wall_l[:, k]), tf.float32) wall_value_r = tf.cast(tf.greater(_px, wall_r[:, k]), tf.float32) wall_value_xmid = tf.cast(tf.greater( _px, wall_l[:, k]), tf.float32) * tf.cast(tf.less(_px, wall_r[:, k]), tf.float32) wall_value_t = tf.cast(tf.less(_py, wall_t[:, k]), tf.float32) wall_value_b = tf.cast(tf.greater(_py, wall_b[:, k]), tf.float32) wall_value_ymid = tf.cast(tf.greater( _py, wall_t[:, k]), tf.float32) * tf.cast(tf.less(_py, wall_b[:, k]), tf.float32) x_delta = tf.abs(delta_value_x) * wall_value_l - tf.abs(delta_value_x) * \ wall_value_r + delta_value_x * wall_value_xmid y_delta = tf.abs(delta_value_y) * wall_value_t - tf.abs(delta_value_y) * \ wall_value_b + delta_value_y * wall_value_ymid # rerand_* if not using distance snap, (_p* + *_delta) if using distance snap _x = rerand[:, k] * rerand_x + not_rerand[:, k] * (_px + x_delta) _y = rerand[:, k] * rerand_y + not_rerand[:, k] * (_py + y_delta) # _x = rerand_x; # _y = rerand_y; # _x = _px + x_delta; # _y = _py + y_delta; # Distance snap end # calculate output vector # slider part sln = relevant_slider_lengths[k] slider_type = relevant_slider_types[k] scos = relevant_slider_cos[k] ssin = relevant_slider_sin[k] _a = cos_list[:, k + half_tensor] _b = sin_list[:, k + half_tensor] # cos(a+θ) = cosa cosθ - sina sinθ # sin(a+θ) = cosa sinθ + sina cosθ _oa = _a * scos - _b * ssin _ob = _a * ssin + _b * scos cp_slider = tf.transpose(tf.stack( [_x / x_max, _y / y_max, _oa, _ob, (_x + _a * sln) / x_max, (_y + _b * sln) / y_max])) _px_slider = tf.cond(next_from_slider_end, lambda: _x + _a * sln, lambda: _x) _py_slider = tf.cond(next_from_slider_end, lambda: _y + _b * sln, lambda: _y) # circle part _a = rerand[:, k] * cos_list[:, k + half_tensor] + \ not_rerand[:, k] * cos_list[:, k] _b = rerand[:, k] * sin_list[:, k + half_tensor] + \ not_rerand[:, k] * sin_list[:, k] # _a = cos_list[:, k + half_tensor]; # _b = sin_list[:, k + half_tensor]; cp_circle = tf.transpose( tf.stack([_x / x_max, _y / y_max, _a, _b, _x / x_max, _y / y_max])) _px_circle = _x _py_circle = _y # Outputs are scaled to [0,1] region outputs = outputs.write(k, tf.where( relevant_is_slider[k], cp_slider, cp_circle)) # Set starting point for the next circle/slider _px = tf.where( tf.cast(relevant_is_slider[k], tf.bool), _px_slider, _px_circle) _py = tf.where( tf.cast(relevant_is_slider[k], tf.bool), _py_slider, _py_circle) return tf.transpose(outputs.stack(), [1, 0, 2])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generator(real_init): #-1, 5, 3\n with tf.variable_scope('rnn_gen', reuse = tf.AUTO_REUSE, initializer=tf.contrib.layers.xavier_initializer()) as scope:\n x_init = real_init[:,:,0] #-1, 8\n y_init = real_init[:,:,1] #-1, 8\n z_init = real_init[:,:,2] #-1, 8\n \n with tf.variable_scope('x_rw', ...
[ "0.6142572", "0.5995336", "0.5860496", "0.5843825", "0.5820258", "0.5785165", "0.5778968", "0.57665837", "0.57546574", "0.5752519", "0.5733955", "0.5669118", "0.56649256", "0.5586168", "0.55840546", "0.5577646", "0.5569322", "0.5565465", "0.5564543", "0.5561244", "0.55558175"...
0.65570605
0
Generate one set (note_group_size) of notes. Trains at least (good_epoch = 6) epochs for each model, then continue training until all the notes satisfy exit conditions (within boundaries). If the training goes on until (max_epoch = 25), it exits anyways. Inside the training loop, each big epoch it trains generator for (g_epochs = 7) epochs, and classifier for (c_epochs = 3). The numbers are set up to balance the powers of those two models. plot_map flag is only used for debugging.
def generate_set(models, begin=0, start_pos=[256, 192], group_id=-1, length_multiplier=1, plot_map=True): extvar["begin"] = begin extvar["start_pos"] = start_pos extvar["length_multiplier"] = length_multiplier extvar["next_from_slider_end"] = GAN_PARAMS["next_from_slider_end"] note_group_size = GAN_PARAMS["note_group_size"] max_epoch = GAN_PARAMS["max_epoch"] good_epoch = GAN_PARAMS["good_epoch"] - 1 g_multiplier = GAN_PARAMS["g_epochs"] c_multiplier = GAN_PARAMS["c_epochs"] g_batch = GAN_PARAMS["g_batch"] g_input_size = GAN_PARAMS["g_input_size"] c_true_batch = GAN_PARAMS["c_true_batch"] c_false_batch = GAN_PARAMS["c_false_batch"] c_randfalse_batch = GAN_PARAMS["c_randfalse_batch"] reset_model_weights(models) set_extvar(models, extvar) gmodel, mapping_layer, classifier_model, mmodel, default_weights = models for i in range(max_epoch): gnoise = np.random.random((g_batch, g_input_size)) glabel = [np.zeros((g_batch, note_group_size * 4)), np.ones((g_batch,)), np.ones((g_batch,))] ginput = conv_input(gnoise, extvar) # fit mmodel instead of gmodel history = mmodel.fit(ginput, glabel, epochs=g_multiplier, validation_split=0.2, verbose=0, callbacks=[]) pred_noise = np.random.random((c_false_batch, g_input_size)) pred_input = conv_input(pred_noise, extvar) predicted_maps_data, predicted_maps_mapped, _predclass = mmodel.predict( pred_input) new_false_maps = predicted_maps_mapped new_false_labels = np.zeros(c_false_batch) # random numbers as negative samples # special_train_data.shape[2] == 6 randfalse_maps = np.random.rand( c_randfalse_batch, note_group_size, special_train_data.shape[2]) randfalse_labels = np.zeros(c_randfalse_batch) rn = np.random.randint(0, special_train_data.shape[0], (c_true_batch,)) actual_train_data = np.concatenate( (new_false_maps, randfalse_maps, special_train_data[rn]), axis=0) actual_train_labels = np.concatenate( (new_false_labels, randfalse_labels, special_train_labels[rn]), axis=0) history2 = classifier_model.fit(actual_train_data, actual_train_labels, epochs=c_multiplier, validation_split=0.2, verbose=0, callbacks=[]) # calculate the losses g_loss = np.mean(history.history['loss']) c_loss = np.mean(history2.history['loss']) print("Group {}, Epoch {}: G loss: {} vs. C loss: {}".format( group_id, 1+i, g_loss, c_loss)) # delete the history to free memory del history, history2 # make a new set of notes res_noise = np.random.random((1, g_input_size)) res_input = conv_input(res_noise, extvar) _resgenerated, res_map, _resclass = mmodel.predict(res_input) if plot_map: plot_current_map(tf.convert_to_tensor(res_map, dtype=tf.float32)) # early return if found a good solution # good is (inside the map boundary) if i >= good_epoch: current_map = res_map if inblock_trueness(current_map[:, :, 0:2]).numpy()[0] == 0 and inblock_trueness(current_map[:, :, 4:6]).numpy()[0] == 0: # debugging options to check map integrity # print(tf.reduce_mean(current_map)); # print("-----MAPLAYER-----") # print(tf.reduce_mean(mapping_layer(conv_input(tf.convert_to_tensor(_resgenerated, dtype="float32"), extvar)))); # print("-----CMWS-----") # print(tf.reduce_mean(construct_map_with_sliders(tf.convert_to_tensor(_resgenerated, dtype="float32"), extvar=mapping_layer.extvar))); break if plot_map: for i in range(3): # from our testing, any random input generates nearly the same map plot_noise = np.random.random((1, g_input_size)) plot_input = conv_input(plot_noise, extvar) _plotgenerated, plot_mapped, _plotclass = mmodel.predict( plot_input) plot_current_map(tf.convert_to_tensor( plot_mapped, dtype=tf.float32)) # Don't do this in this version. It's for old versions where models are rebuilt each loop # del mmodel, mapping_layer; return res_map.squeeze()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)", "def generate(train_data_path, trained_model_path, num_output...
[ "0.6539602", "0.59907496", "0.5987289", "0.5776078", "0.575335", "0.56405765", "0.5622853", "0.5605876", "0.5576589", "0.5575136", "0.5563018", "0.5555339", "0.55250406", "0.5518238", "0.54963267", "0.54839075", "0.54748034", "0.54684985", "0.54609823", "0.5452404", "0.544303...
0.7040524
0
This is only used in debugging. Prints .osu text directly.
def print_osu_text(a): for i, ai in enumerate(a): if not is_slider[i]: print("{},{},{},1,0,0:0:0".format( int(ai[0]), int(ai[1]), int(timestamps[i]))) else: print("{},{},{},2,0,L|{}:{},1,{},0:0:0".format(int(ai[0]), int(ai[1]), int(timestamps[i]), int(round( ai[0] + ai[2] * slider_lengths[i])), int(round(ai[1] + ai[3] * slider_lengths[i])), int(slider_length_base[i] * slider_ticks[i])))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def emu_print(text):\n print \"%s %s\" % (EMU_PRINT_PREFIX, text)", "def print_text(TINY_FONT, x, y, text, color = white):\n text_image = TINY_FONT.render(text, True, color)\n gameDisplay.blit(text_image, (x,y))", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def Print(self, te...
[ "0.65063953", "0.636514", "0.62646013", "0.6242274", "0.61999524", "0.6140702", "0.6124424", "0.60740983", "0.6073409", "0.6061641", "0.6054812", "0.60428387", "0.60134625", "0.60110116", "0.5988794", "0.5965525", "0.5959333", "0.59516466", "0.5946331", "0.59317094", "0.59148...
0.6594397
0
get_containment_slots(game_object) Retrieve the containment slots of an object.
def get_containment_slots(cls, game_object: GameObject) -> Tuple[CommonObjectContainmentSlot]: # noinspection PyTypeChecker game_object: GameObject = CommonObjectUtils.get_root_parent(game_object) slot_component = cls.get_slot_component(game_object) if slot_component is None: return tuple() containment_slot_list: List[CommonObjectContainmentSlot] = list() for (slot_hash, slot_types) in tuple(slot_component.get_containment_slot_infos()): containment_slot_list.append(CommonObjectContainmentSlot(slot_hash, slot_types)) return tuple(containment_slot_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slots(self):\n return self.__slots.values()", "def GetAllSlots(cls):\n slots = []\n for parent in cls.__mro__:\n slots.extend(getattr(parent, \"__slots__\", []))\n return slots", "def get_slot_component(cls, game_object: GameObject) -> Union[SlotComponent, None]:\n if not Common...
[ "0.526133", "0.5145089", "0.5117318", "0.5091099", "0.50412333", "0.5012706", "0.48904616", "0.4870754", "0.48465842", "0.47900295", "0.47505447", "0.47355798", "0.47103837", "0.4635481", "0.462364", "0.46191993", "0.4604094", "0.45993218", "0.4587846", "0.4573867", "0.454375...
0.8407297
0
get_slot_component(game_object) Retrieve the SlotComponent of an Object.
def get_slot_component(cls, game_object: GameObject) -> Union[SlotComponent, None]: if not CommonComponentUtils.has_component(game_object, CommonComponentType.SLOT): return None # noinspection PyTypeChecker slot_component: SlotComponent = CommonComponentUtils.get_component(game_object, CommonComponentType.SLOT) return slot_component
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSlotforObject(cls, obj):\n if obj.__class__ in restslotattributedict.keys():\n attr = cls.getSlotAttrib(obj)\n if attr is not None:\n peek = getattr(obj, \"peek_\" + attr)\n slot = str(peek()).split('/')[0]\n else:\n return False, ...
[ "0.5974101", "0.5687909", "0.5576533", "0.5557353", "0.5550324", "0.55383635", "0.54384905", "0.5350814", "0.5348756", "0.53248906", "0.53145486", "0.52669525", "0.52061915", "0.5178124", "0.51176554", "0.51156604", "0.5053627", "0.5039317", "0.50180686", "0.5006887", "0.4996...
0.8331069
0
get_slot_name(slot) Retrieve the name of a slot.
def get_slot_name(cls, slot: SlotType) -> str: if slot is None: return 'No Slot Name' return slot.__name__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_slot_variable(self, layer_name, slot_name):\n return self._tls._slot_variables.get(layer_name, {}).get(\n slot_name, None\n )", "def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot nu...
[ "0.6404714", "0.60975593", "0.5857362", "0.5857362", "0.5825722", "0.5819096", "0.5776919", "0.57526577", "0.5740833", "0.5738997", "0.56404126", "0.5614616", "0.5590616", "0.5581622", "0.5572394", "0.5567416", "0.5482684", "0.54648507", "0.54605675", "0.54585487", "0.5458404...
0.89791465
0
get_first_connected_object_by_slot_name(script_object, slot_name, include_object_callback=None) Get the first connected object by slot.
def get_first_connected_object_by_slot_name( cls, script_object: ScriptObject, slot_name: CommonSlotType, include_object_callback: Callable[[ScriptObject], bool] = None ) -> Union[ScriptObject, None]: for child in cls.get_connected_objects_by_slot_name_gen( script_object, slot_name, include_object_callback=include_object_callback ): return child return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_connected_objects_by_slot_name_gen(\n cls,\n script_object: ScriptObject,\n slot_name: CommonSlotType,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Iterator[ScriptObject]:\n if script_object is None:\n return tuple()\n\n slo...
[ "0.71495444", "0.5211954", "0.5190289", "0.51773876", "0.50943196", "0.5074009", "0.50448954", "0.4951919", "0.49485144", "0.49116898", "0.4874453", "0.48243442", "0.4816911", "0.4816911", "0.4781857", "0.47295526", "0.47264364", "0.47204056", "0.47114155", "0.46943238", "0.4...
0.88282555
0
get_connected_objects_by_slot_generator(script_object, slot_name, include_object_callback=None) Get all connected objects by slot.
def get_connected_objects_by_slot_name_gen( cls, script_object: ScriptObject, slot_name: CommonSlotType, include_object_callback: Callable[[ScriptObject], bool] = None ) -> Iterator[ScriptObject]: if script_object is None: return tuple() slot_name_str = str(slot_name) with_slot_in_front_of_name = f'slot_{slot_name}' def _has_slot_name(_connected_object: ScriptObject) -> bool: if not _connected_object.parent_slot: return False for _connected_object_slot_type in _connected_object.parent_slot.slot_types: if cls.get_slot_name(_connected_object_slot_type) in (slot_name_str, with_slot_in_front_of_name): return True return False if include_object_callback is not None: include_object_callback = CommonFunctionUtils.run_predicates_as_one((_has_slot_name, include_object_callback)) else: include_object_callback = _has_slot_name for connected_object in CommonObjectSlotUtils.get_all_connected_objects_gen( script_object, include_object_callback=include_object_callback ): yield connected_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_first_connected_object_by_slot_name(\n cls,\n script_object: ScriptObject,\n slot_name: CommonSlotType,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Union[ScriptObject, None]:\n for child in cls.get_connected_objects_by_slot_name_gen(\n ...
[ "0.6946404", "0.6847893", "0.5160706", "0.4920565", "0.47073737", "0.47068468", "0.46840045", "0.46185264", "0.4608268", "0.46013737", "0.4584478", "0.45532605", "0.4523386", "0.4488073", "0.44853002", "0.44077507", "0.44007653", "0.43828243", "0.43709967", "0.43648946", "0.4...
0.8711022
0
get_all_connected_objects_generator(\ script_object,\ include_self=False,\ direct_connections_only=False,\ include_object_callback=None\ ) Retrieve all objects connected to the specified Object.
def get_all_connected_objects_gen( cls, script_object: ScriptObject, include_self: bool = False, direct_connections_only: bool = False, include_object_callback: Callable[[ScriptObject], bool] = None ) -> Iterator[BaseObject]: if direct_connections_only: if include_self: yield script_object for connected_object in script_object.children: if connected_object is None: continue if include_object_callback is not None and not include_object_callback(connected_object): continue yield connected_object else: for connected_object in script_object.children_recursive_gen(include_self=include_self): if connected_object is None: continue if include_object_callback is not None and not include_object_callback(connected_object): continue yield connected_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_connected_objects_by_slot_name_gen(\n cls,\n script_object: ScriptObject,\n slot_name: CommonSlotType,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Iterator[ScriptObject]:\n if script_object is None:\n return tuple()\n\n slo...
[ "0.62425333", "0.5680843", "0.54635173", "0.5462352", "0.54386246", "0.5204806", "0.515782", "0.51439136", "0.51033455", "0.5007588", "0.49970877", "0.49409634", "0.49109083", "0.49077046", "0.48719302", "0.48705676", "0.48520735", "0.48517132", "0.4849996", "0.47837555", "0....
0.8612306
0
Return wx.Icon object based on file basename and bitmap size.
def Icon(self, size, name): # ------------------------------------------------------------------------ bitmap = self.Bitmap(size, name) if not bitmap: return None icon = wx.EmptyIcon() icon.CopyFromBitmap(bitmap) return icon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LoadIcon(filename):\n # wx.Image.AddHandler(wx.PNGHandler) # This should work but it doesn't so...\n wx.InitAllImageHandlers() # ...falling back to this instead\n\n filename = \"icons/\" + filename + \".png\"\n image = wx.Image()\n\n with open(filename, mode='r...
[ "0.68177384", "0.6459053", "0.636313", "0.63032913", "0.61679065", "0.6149648", "0.61276317", "0.6058071", "0.6027682", "0.6016447", "0.5983509", "0.5916355", "0.5914208", "0.5907837", "0.5867787", "0.5860089", "0.58295524", "0.5822217", "0.582023", "0.57835793", "0.5761614",...
0.7422429
0
Search anime in anime database matching filters and return given fields
def search_anime(user_id, filters, fields, sort_col, desc): my_fields = [] for f in fields: if hasattr(Anime, f): my_fields.append(getattr(Anime, f)) my_filters = [ ~Anime.malId.in_(db.session.query(UserToAnime.malId) .filter(UserToAnime.userId == user_id) .subquery()) ] for f in AA_FILTERS: if filters.get(f): my_filters.append(AA_FILTERS[f](filters[f])) if not hasattr(Anime, sort_col): sort_col = 'title' sort_col = getattr(Anime, sort_col) if desc: sort_col = sort_col.desc() results = db.session.query(*my_fields).filter(*my_filters).order_by(sort_col).limit(30) return parse_search_results(fields, results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_anime_info(anime_id, fields):\n my_fields = []\n for f in fields:\n try:\n my_fields.append(getattr(Anime, f))\n except AttributeError:\n pass\n\n my_filters = [Anime.malId == anime_id]\n\n results = db.session.query(*my_fields).filter(*my_filters).limit(1)\n...
[ "0.74254316", "0.5717658", "0.5675069", "0.5618613", "0.55780196", "0.55210227", "0.5499253", "0.5397141", "0.5395199", "0.53436446", "0.53295803", "0.53275436", "0.531523", "0.52899784", "0.52556527", "0.52407557", "0.5222486", "0.5196042", "0.5195845", "0.51951796", "0.5175...
0.66820604
1
Search anime in User's MAL matching filters and return given fields
def search_mal(user_id, filters, fields, sort_col, desc): my_fields = [] for f in fields: if hasattr(Anime, f): my_fields.append(getattr(Anime, f)) elif hasattr(UserToAnime, f): my_fields.append(getattr(UserToAnime, f)) my_filters = [ Anime.malId.in_(db.session.query(UserToAnime.malId) .filter(UserToAnime.userId == user_id) .subquery()), MAL_FILTERS["join"]("dummy") ] for f in MAL_FILTERS: if filters.get(f): my_filters.append(MAL_FILTERS[f](filters[f])) if not hasattr(Anime, sort_col): if not hasattr(UserToAnime, sort_col): sort_col = getattr(Anime, 'title') else: sort_col = getattr(UserToAnime, sort_col) else: sort_col = getattr(Anime, sort_col) if desc: sort_col = sort_col.desc() results = db.session.query(*my_fields).filter(*my_filters).order_by(sort_col).limit(30) return parse_search_results(fields, results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_anime(user_id, filters, fields, sort_col, desc):\n my_fields = []\n for f in fields:\n if hasattr(Anime, f):\n my_fields.append(getattr(Anime, f))\n\n my_filters = [\n ~Anime.malId.in_(db.session.query(UserToAnime.malId)\n .filter(UserToAnime.use...
[ "0.6912799", "0.6804895", "0.6398511", "0.5926814", "0.591433", "0.5748039", "0.5706041", "0.5679347", "0.5626423", "0.5546155", "0.5520321", "0.5474376", "0.5450997", "0.5450491", "0.54390633", "0.5424477", "0.540696", "0.538905", "0.5386829", "0.5367615", "0.5357427", "0....
0.7085026
0
Get field info for the anime with the given ID
def get_anime_info(anime_id, fields): my_fields = [] for f in fields: try: my_fields.append(getattr(Anime, f)) except AttributeError: pass my_filters = [Anime.malId == anime_id] results = db.session.query(*my_fields).filter(*my_filters).limit(1) return parse_search_results(fields, results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_information(id,field=None):\n data = read_csv()\n if field:\n return data[field][id]\n else:\n return data.iloc[id]", "def info(self, id):", "def getField(self, fieldID, time):\n\n key = (fieldID, time)\n field = 0\n if fieldID not in self.fields.index:\n ...
[ "0.6446642", "0.63725543", "0.61957574", "0.59160596", "0.5703118", "0.5693682", "0.56645566", "0.55645645", "0.5498024", "0.5481042", "0.54654", "0.54090756", "0.54001194", "0.5395187", "0.5359088", "0.5338945", "0.53293055", "0.53100646", "0.529101", "0.52805066", "0.527375...
0.76015735
0
Bulk add anime to database
def add_anime(utoa_list): for utoa in utoa_list: db.session.add(utoa) db.session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_aa_data(anime_list):\n for anime, atog in anime_list:\n db.session.add(anime)\n for genre in atog:\n db.session.add(genre)\n\n db.session.commit()", "def update_anime(utoa_list):\n for utoa in utoa_list:\n db.session.merge(utoa)\n\n db.session.commit()", "...
[ "0.7134782", "0.6691967", "0.6538583", "0.608107", "0.5851746", "0.5843146", "0.5800973", "0.57812434", "0.56920874", "0.56710035", "0.5620775", "0.56091475", "0.55755377", "0.5571656", "0.55624515", "0.55595756", "0.55502504", "0.55269414", "0.5497408", "0.5468426", "0.54623...
0.76151854
0
Bulk update anime to database
def update_anime(utoa_list): for utoa in utoa_list: db.session.merge(utoa) db.session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_bulk(self, iterable):\n self.cursor.executemany(self.UPDATE, iterable)", "def add_anime(utoa_list):\n for utoa in utoa_list:\n db.session.add(utoa)\n\n db.session.commit()", "def update_afferents_ap(self,time):\n\t\t# Iterate over all dictionaries\n\t\tfor muscle in self.cells:\...
[ "0.61579776", "0.6129972", "0.6076216", "0.5882213", "0.57964015", "0.576862", "0.5720905", "0.57084924", "0.57017916", "0.5699537", "0.56977254", "0.56977254", "0.56723005", "0.56692874", "0.5665055", "0.5665055", "0.564072", "0.56222653", "0.5608532", "0.5565741", "0.551082...
0.7491884
0
Deletes MALB for corresponding user
def delete_malb(user_id): return db.session.query(UserToAnime)\ .filter(UserToAnime.userId == user_id)\ .delete()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_user():", "def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass", "def delete_user():\n #TODO user delete\n pass", "def delete_account(request):\n ubanks ...
[ "0.68551534", "0.6640083", "0.65942574", "0.6466849", "0.6441461", "0.6334826", "0.6277041", "0.6270421", "0.6219305", "0.6171667", "0.6159259", "0.61316097", "0.61063683", "0.60959184", "0.6094192", "0.6052034", "0.60442144", "0.604069", "0.603064", "0.6013814", "0.6002803",...
0.7381267
0
Imports list of (Anime, AtoG) tuples into database
def import_aa_data(anime_list): for anime, atog in anime_list: db.session.add(anime) for genre in atog: db.session.add(genre) db.session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_anime(utoa_list):\n for utoa in utoa_list:\n db.session.add(utoa)\n\n db.session.commit()", "def create_ta_list(ta_list):\n with open(ta_list, \"r\") as ta_file:\n user_list = ta_file.readlines()\n add_to_db(\"ta_list\", user_list[1:])\n add_to_online_db(\"online_ta\"...
[ "0.6072674", "0.5872311", "0.57183385", "0.5652524", "0.5601286", "0.55983347", "0.5571785", "0.55452985", "0.5471581", "0.5460966", "0.5454386", "0.5442215", "0.5435304", "0.54290146", "0.54154193", "0.5401241", "0.538957", "0.5381085", "0.53738105", "0.5352589", "0.5347758"...
0.81226104
0
Parse DB search results into Search Anime format
def parse_search_results(fields, results): my_results = [] for result in results: my_results.append(SearchAnimeResult(fields, result)) return my_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_results(self, result):\n\n interesting = []\n for item in result[\"hits\"][\"hits\"]:\n source = item[\"_source\"]\n meta = source.get(\"meta\")\n\n title = \"No title found\"\n descr = None\n os_path = None\n highl...
[ "0.6209895", "0.5871288", "0.58117527", "0.57239765", "0.57141274", "0.56517756", "0.56350136", "0.56172013", "0.55664617", "0.5535393", "0.5498918", "0.5498918", "0.54667425", "0.54594475", "0.5428907", "0.5407299", "0.53945076", "0.5391623", "0.5381492", "0.537054", "0.5364...
0.6991769
0
Open a host program output file and parse out the runtime of the program.
def get_host_runtime(path: pathlib.Path): with path.open() as f: # Read lines lines = f.readlines() # Parse out the runtime runtime = 0 for line in lines: if "Host time" in line: line_split = line.split(' ') runtime = float(line_split[-1]) break return runtime
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_program():\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), PROGRAM_TXT)\n f = open(filepath, 'r')\n program = f.read()\n f.close()\n return program.strip().split('\\n')", "def load(self, program_file):\n\n if self.debug:\n print()\n print_he...
[ "0.58105475", "0.57347614", "0.5549151", "0.5497355", "0.5476351", "0.5475047", "0.54340535", "0.54338425", "0.5428344", "0.53656286", "0.5295708", "0.5295352", "0.52802324", "0.52734137", "0.52634996", "0.5248898", "0.5242457", "0.52363616", "0.5235858", "0.5185998", "0.5167...
0.5749508
1
Calculate the average runtime reported in all host output files in a given folder for a particular test case.
def get_avg_host_runtime(path: pathlib.Path, testfile): total_time = 0.0 num_files = 0 for filename in path.iterdir(): if (testfile in str(filename)) and ('host' in str(filename)): total_time += get_host_runtime(filename) total_time += get_overhead_time(filename)[0] num_files += 1 if num_files > 0: return (total_time / num_files) else: return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _avg_performance(bd_dims, BD_directory, run,archive_file_path,max_performance,conversion_func=None,from_fitfile=False):\n path=get_archive_filepath(BD_directory,run, archive_file_path)\n all_performances=get_all_performances(bd_dims, path, conversion_func,from_fitfile)\n return np.mean(all_performanc...
[ "0.65712726", "0.6475712", "0.64513355", "0.6136365", "0.6105839", "0.60927486", "0.5996279", "0.59590924", "0.5829954", "0.57890874", "0.5748628", "0.5721866", "0.5715467", "0.5701655", "0.56953984", "0.56830746", "0.5660013", "0.5658348", "0.5656913", "0.5650345", "0.562757...
0.7699728
0
Calculate the average broken down overhead time reported by output files in a given folder for a particular test case.
def get_avg_overhead_time(path: pathlib.Path, testfile, num_dpus, num_tasks): time = [0.0]*6 num_files = 0 for filename in path.iterdir(): dpus = re.search(rf"dpus={num_dpus}[^0-9]", str(filename)) tasklets = re.search(rf"tasklets={num_tasks}[^0-9]", str(filename)) if (testfile in str(filename)) and (dpus is not None) and (tasklets is not None): time = list(map(add, time, get_overhead_time(filename))) num_files += 1 if num_files > 0: return [x / num_files for x in time] else: return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_avg_host_runtime(path: pathlib.Path, testfile):\n total_time = 0.0\n num_files = 0\n for filename in path.iterdir():\n if (testfile in str(filename)) and ('host' in str(filename)):\n total_time += get_host_runtime(filename)\n ...
[ "0.6887931", "0.635288", "0.6345429", "0.6250087", "0.5861928", "0.58488417", "0.57700205", "0.5639624", "0.55859274", "0.5584842", "0.55632645", "0.5554245", "0.55452335", "0.5545061", "0.55297774", "0.55194134", "0.5515678", "0.55038553", "0.55022204", "0.54996353", "0.5495...
0.71506226
0