Revision | 1d212f2664a62d7de0b1737c0a60848e4fd778bd (tree) |
---|---|
Time | 2008-11-13 04:28:13 |
Author | iselllo |
Commiter | iselllo |
I added a code which now properly calculates the mean coordination number in the system (the previous calculation in
plot_statistics_single.py was WRONG!!!).
@@ -0,0 +1,1107 @@ | ||
1 | +#! /usr/bin/env python | |
2 | + | |
3 | +import scipy as s | |
4 | +import numpy as n | |
5 | +import pylab as p | |
6 | +#from rpy import r | |
7 | +import distance_calc as d_calc | |
8 | +import igraph as ig | |
9 | +import calc_rad as c_rad | |
10 | +#import g2_calc as g2 | |
11 | +from scipy import stats #I need this module for the linear fit | |
12 | + | |
13 | + | |
14 | + | |
15 | + | |
16 | +n_part=5000 | |
17 | +density=0.01 | |
18 | + | |
19 | +box_vol=n_part/density | |
20 | + | |
21 | + | |
22 | +part_diam=1. # particle diameter | |
23 | + | |
24 | +part_vol=s.pi/6.*part_diam**3. | |
25 | + | |
26 | +tot_vol=n_part*part_vol | |
27 | + | |
28 | +cluster_look = 50 # specific particle (and corresponding cluster) you want to track | |
29 | + | |
30 | +#Again, I prefer to give the number of particles and the density as input parameters | |
31 | +# and work out the box size accordingly | |
32 | + | |
33 | +collisions =1 #whether I should take statistics about the collisions or not. | |
34 | + | |
35 | +ini_config=0 | |
36 | +fin_config=3000 #for large data post-processing I need to declare an initial and final | |
37 | + | |
38 | + #configuration I want to read and post-process | |
39 | + | |
40 | + | |
41 | +by=1000 #this tells how many configurations there are in the file I am reading | |
42 | + | |
43 | +figure=0 #whether I sould print many figures or not | |
44 | + | |
45 | + | |
46 | +n_config=(fin_config-ini_config)/by # total number of configurations, which are numbered from 0 to n_config-1 | |
47 | + | |
48 | +my_selection=s.arange(ini_config,fin_config,by) | |
49 | + | |
50 | +box_size=(n_part/density)**(1./3.) | |
51 | +print "the box size is, ", box_size | |
52 | + | |
53 | +threshold=1.04 #threshold to consider to particles as directly connected | |
54 | + | |
55 | + | |
56 | +save_size_save =1 #tells whether I want to save the coordinates of a cluster of a specific size | |
57 | + | |
58 | +size_save=98 | |
59 | + | |
60 | + | |
61 | + | |
62 | +t_step=1. #here meant as the time-separation between adjacent configurations | |
63 | + | |
64 | + | |
65 | + | |
66 | + | |
67 | + | |
68 | +fold=0 | |
69 | + | |
70 | +gyration = 1 #parameter controlling whether I want to calculate the radius of gyration | |
71 | + | |
72 | +#time=s.linspace(0.,((n_config-1)*t_step),n_config) #I define the time along the simulation | |
73 | +time=p.load("../1/time.dat") | |
74 | + | |
75 | + | |
76 | +delta_t=(time[2]-time[1])*by | |
77 | + | |
78 | + | |
79 | +time=time[my_selection] #I adapt the time to the initial and final configuration | |
80 | + | |
81 | +p.save("time_red.dat",time) | |
82 | + | |
83 | + | |
84 | +#some physical parameters of the system | |
85 | +T=0.1 #temperature | |
86 | +beta=0.1 #damping coefficient | |
87 | +v_0=0. #initial particle mean velocity | |
88 | + | |
89 | + | |
90 | +n_s_ini=2 #element in the time-sequence corresponding | |
91 | + # to the chosen reference time for calculating the velocity autocorrelation | |
92 | + #function | |
93 | + | |
94 | +s_ini=(n_s_ini)*t_step #I define a specific time I will need for the calculation | |
95 | +# of the autocorrelation function | |
96 | + | |
97 | +#print 'the time chosen for the velocity correlation function is, ', s_ini | |
98 | +#print 'and the corresponding time on the array is, ', time[n_s_ini] | |
99 | + | |
100 | + | |
101 | + | |
102 | +Len=box_size | |
103 | +print 'Len is, ', Len | |
104 | + | |
105 | + | |
106 | + | |
107 | +calculate=1 | |
108 | + | |
109 | +if (calculate == 1): | |
110 | + | |
111 | + | |
112 | +# energy_1000=p.load("tot_energy.dat") | |
113 | + | |
114 | +# p.plot(energy_1000[:,0], energy_1000[:,1] ,linewidth=2.) | |
115 | +# p.xlabel('time') | |
116 | +# p.ylabel('energy') | |
117 | +# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part')) | |
118 | +# p.title('energy evolution') | |
119 | +# p.grid(True) | |
120 | +# p.savefig('energy_2000_particles.pdf') | |
121 | +# p.hold(False) | |
122 | + | |
123 | +# print "the std of the energy for a system with 2000 particles is, ", s.std(energy_1000[config_ini:n_config,1]) | |
124 | +# print "the mean of the energy for a system with 2000 particles is, ", s.mean(energy_1000[config_ini:n_config,1]) | |
125 | + | |
126 | +# print "the ratio of the two is,", s.std(energy_1000[config_ini:n_config,1])\ | |
127 | +# /s.mean(energy_1000[config_ini:n_config,1]) | |
128 | +# print "and the theoretical value is, ", s.sqrt(2./3.)*(1./s.sqrt(n_part)) | |
129 | + | |
130 | +# tot_config=p.load("total_configuration.dat") | |
131 | +# tot_config=tot_config[(ini_config*n_part*3):(fin_config*n_part*3)] | |
132 | + #I cut the array with the total number | |
133 | + #of configurations for large arrays | |
134 | + | |
135 | + | |
136 | + | |
137 | + | |
138 | +# print "the length of tot_config is, ", len(tot_config) | |
139 | +# tot_config=s.reshape(tot_config,(n_config,3*n_part)) | |
140 | + | |
141 | +# print "tot_config at line 10 is, ", tot_config[10,:] | |
142 | + | |
143 | + | |
144 | +# #Now I save some "raw" data for detailed analysis | |
145 | +# i=71 | |
146 | +# test_arr=tot_config[i,:] | |
147 | +# test_arr=s.reshape(test_arr,(n_part,3)) | |
148 | +# p.save("test_71_raw.dat",test_arr) | |
149 | + | |
150 | + | |
151 | + | |
152 | + #Now I want to "fold" particle positions in order to be sure that they are inside | |
153 | + # my box. | |
154 | + | |
155 | + #f[:,:,k]=where((Vsum<=V[k+1]) & (Vsum>=V[k]), (V[k+1]-Vsum)/(V[k+1]-V[k]),\ | |
156 | + #f[:,:,k] ) | |
157 | + #f[:,:,k]=where((Vsum<=V[k]) & (Vsum>=V[k-1]),(Vsum-V[k-1])/(V[k]-V[k-1]),\ | |
158 | + #f[:,:,k]) | |
159 | + | |
160 | + | |
161 | + | |
162 | +# if (fold ==1): | |
163 | +# #In the case commented below, the box goes from [-L/2,L/2] but that is proba | |
164 | +# #bly not the case in Espresso | |
165 | + | |
166 | +# #Len=box_size/2. | |
167 | + | |
168 | + | |
169 | +# # and then I do the following | |
170 | + | |
171 | +# #tot_config=s.where((tot_config>Len) & (s.remainder(tot_config,(2.*Len))<Len),\ | |
172 | +# #s.remainder(tot_config,Len),tot_config) | |
173 | + | |
174 | + | |
175 | +# #tot_config=s.where((tot_config>Len) & (s.remainder(tot_config,(2.*Len))>=Len),\ | |
176 | +# #(s.remainder(tot_config,Len)-Len),tot_config) | |
177 | + | |
178 | +# #tot_config=s.where((tot_config< -Len) &(s.remainder(tot_config,(-2.*Len))< -Len)\ | |
179 | +# #(s.remainder(tot_config,-Len)+Len),tot_config) | |
180 | + | |
181 | +# #tot_config=s.where((tot_config< -Len) &(s.remainder(tot_config,(-2.*Len))>=-Len)\ | |
182 | +# #s.remainder(tot_config,-Len),tot_config) | |
183 | + | |
184 | + | |
185 | +# #Now the case when the box is [0,L], so Len is actually the box size | |
186 | + | |
187 | +# p.save("tot_config_unfolded.dat",tot_config) | |
188 | + | |
189 | +# Len=box_size | |
190 | + | |
191 | +# tot_config=s.where(tot_config>Len,s.remainder(tot_config,Len),tot_config) | |
192 | +# tot_config=s.where(tot_config<0.,(s.remainder(tot_config,-Len)+Len),tot_config) | |
193 | + | |
194 | +# print "the max of tot_config is", tot_config.max() | |
195 | +# print "the min of tot_config is", tot_config.min() | |
196 | + | |
197 | +# p.save("tot_config_my_folding.dat",tot_config) | |
198 | + | |
199 | +# x_0=tot_config[0,0] #initial x position of all my particles | |
200 | + | |
201 | + | |
202 | + | |
203 | +# x_col=s.arange(n_part)*3 | |
204 | + | |
205 | +# print "x_col is, ", x_col | |
206 | + | |
207 | + | |
208 | +# #Now I plot the motion of particle number 1; I follow the three components | |
209 | + | |
210 | +# p.plot(time,tot_config[:,0] ,linewidth=2.) | |
211 | +# p.xlabel('time') | |
212 | +# p.ylabel('x position particle 1 ') | |
213 | +# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part')) | |
214 | +# p.title('x_1 vs time') | |
215 | +# p.grid(True) | |
216 | +# p.savefig('x_position_particle_1.pdf') | |
217 | +# p.hold(False) | |
218 | + | |
219 | +# # p.save("mean_square_disp.dat", var_x_arr) | |
220 | + | |
221 | + | |
222 | +# p.plot(time,tot_config[:,1] ,linewidth=2.) | |
223 | +# p.xlabel('time') | |
224 | +# p.ylabel('y position particle 1 ') | |
225 | +# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part')) | |
226 | +# p.title('y_1 vs time') | |
227 | +# p.grid(True) | |
228 | +# p.savefig('y_position_particle_1.pdf') | |
229 | +# p.hold(False) | |
230 | + | |
231 | +# # p.save("mean_square_disp.dat", var_x_arr) | |
232 | + | |
233 | + | |
234 | +# p.plot(time,tot_config[:,2] ,linewidth=2.) | |
235 | +# p.xlabel('time') | |
236 | +# p.ylabel('z position particle 1 ') | |
237 | +# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part')) | |
238 | +# p.title('z_1 vs time') | |
239 | +# p.grid(True) | |
240 | +# p.savefig('z_position_particle_1.pdf') | |
241 | +# p.hold(False) | |
242 | + | |
243 | +# # p.save("mean_square_disp.dat", var_x_arr) | |
244 | + | |
245 | + | |
246 | + | |
247 | + | |
248 | + #x_arr=s.zeros((n_part)) | |
249 | + #y_arr=s.zeros((n_part)) | |
250 | + #z_arr=s.zeros((n_part)) | |
251 | + | |
252 | + | |
253 | + | |
254 | + # for i in xrange(0,n_part): | |
255 | +# x_arr[:,i]=tot_config[:,3*i] | |
256 | +# y_arr[:,i]=tot_config[:,(3*i+1)] | |
257 | +# z_arr[:,i]=tot_config[:,(3*i+2)] | |
258 | + | |
259 | + | |
260 | +# print "the x coordinates are, ", x_arr | |
261 | + | |
262 | +# #Now a test: I try to calculate the radius of gyration | |
263 | +# #with a particular distance (suggestion by Yannis) | |
264 | + | |
265 | +# mean_x_arr=s.zeros(n_config) | |
266 | + | |
267 | +# mean_x_arr=x_arr.mean(axis=1) | |
268 | + | |
269 | + | |
270 | +# mean_y_arr=s.zeros(n_config) | |
271 | + | |
272 | +# mean_y_arr=y_arr.mean(axis=1) | |
273 | + | |
274 | + | |
275 | +# mean_z_arr=s.zeros(n_config) | |
276 | + | |
277 | +# mean_z_arr=z_arr.mean(axis=1) | |
278 | + | |
279 | +# var_x_arr2=s.zeros(n_config) | |
280 | +# var_y_arr2=s.zeros(n_config) | |
281 | +# var_y_arr2=s.zeros(n_config) | |
282 | + | |
283 | + | |
284 | + | |
285 | +# for i in xrange(0,n_config): | |
286 | +# for j in xrange(0,n_part): | |
287 | +# var_x_arr2[i]=var_x_arr2[i]+((x_arr[i,j]-mean_x_arr[i])- \ | |
288 | +# Len*n.round((x_arr[i,j]-mean_x_arr[i])/Len))**2. | |
289 | + | |
290 | + | |
291 | +# var_x_arr2=var_x_arr2/n_part | |
292 | +# print 'var_x_arr2 is, ', var_x_arr2 | |
293 | +# p.save( "test_config_before_manipulation.dat",x_arr[600,:]) | |
294 | + | |
295 | + | |
296 | + | |
297 | +# max_x_dist=s.zeros(n_config) | |
298 | + | |
299 | + | |
300 | +# #for i in xrange(0, n_config): | |
301 | +# #mean_x_arr[i]=s.mean(x_arr[i,:]) | |
302 | +# #var_x_arr[i]=s.var(x_arr[i,:]) | |
303 | + | |
304 | +# mean_x_arr[:]=s.mean(x_arr,axis=1) | |
305 | +# var_x_arr[:]=s.var(x_arr,axis=1) | |
306 | +# max_x_dist[:]=x_arr.max(axis=1)-x_arr.min(axis=1) | |
307 | + | |
308 | +# print "mean_x_arr is, ", mean_x_arr | |
309 | +# print "var_x_arr is, ", var_x_arr | |
310 | + | |
311 | + | |
312 | +# if (fold==1): | |
313 | + | |
314 | +# p.plot(time, max_x_dist ,linewidth=2.) | |
315 | +# p.xlabel('time') | |
316 | +# p.ylabel('x-dimension of aggregate ') | |
317 | +# p.title('x-maximum stretch vs time [folded]') | |
318 | +# p.grid(True) | |
319 | +# p.savefig('max_x_distance_folded.pdf') | |
320 | +# p.hold(False) | |
321 | + | |
322 | +# p.save("max_x_distance_folded.dat", max_x_dist) | |
323 | + | |
324 | + | |
325 | +# elif (fold==0): | |
326 | +# p.plot(time, max_x_dist ,linewidth=2.) | |
327 | +# p.xlabel('time') | |
328 | +# p.ylabel('x-dimension of aggregate ') | |
329 | +# p.title('x-maximum stretch vs time[unfolded]') | |
330 | +# p.grid(True) | |
331 | +# p.savefig('max_x_distance_unfolded.pdf') | |
332 | +# p.hold(False) | |
333 | + | |
334 | +# p.save("max_x_distance_unfolded.dat", max_x_dist) | |
335 | + | |
336 | + | |
337 | + | |
338 | + | |
339 | + | |
340 | +# p.plot(time, var_x_arr ,linewidth=2.) | |
341 | +# p.xlabel('time') | |
342 | +# p.ylabel('mean square displacement ') | |
343 | +# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part')) | |
344 | +# p.title('VAR(x) vs time') | |
345 | +# p.grid(True) | |
346 | +# p.savefig('mean_square_disp.pdf') | |
347 | +# p.hold(False) | |
348 | + | |
349 | +# p.save("mean_square_disp.dat", var_x_arr) | |
350 | + | |
351 | + | |
352 | + | |
353 | +# p.plot(time, s.sqrt(var_x_arr) ,linewidth=2.) | |
354 | +# p.xlabel('time') | |
355 | +# p.ylabel('mean square displacement ') | |
356 | +# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part')) | |
357 | +# p.title('std(x) vs time') | |
358 | +# p.grid(True) | |
359 | +# p.savefig('std_of_x_position.pdf') | |
360 | +# p.hold(False) | |
361 | + | |
362 | +# p.save("std_of_x_position.dat", s.sqrt(var_x_arr)) | |
363 | + | |
364 | + | |
365 | + | |
366 | + | |
367 | +# p.plot(time, mean_x_arr ,linewidth=2.) | |
368 | +# p.xlabel('time') | |
369 | +# p.ylabel('mean x position ') | |
370 | +# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part')) | |
371 | +# p.title('mean(x) vs time') | |
372 | +# p.grid(True) | |
373 | +# p.savefig('mean_x_position.pdf') | |
374 | +# p.hold(False) | |
375 | + | |
376 | +# p.save("mean_x_position.dat", mean_x_arr) | |
377 | + | |
378 | + | |
379 | +# #I now calculate the radius of gyration | |
380 | + | |
381 | +# if (gyration ==1): | |
382 | + | |
383 | +# mean_y_arr=s.zeros(n_config) | |
384 | +# mean_z_arr=s.zeros(n_config) | |
385 | + | |
386 | + | |
387 | + | |
388 | + | |
389 | +# var_y_arr[:]=s.var(y_arr,axis=1) | |
390 | +# var_z_arr[:]=s.var(z_arr,axis=1) | |
391 | + | |
392 | +# mean_y_arr[:]=s.mean(y_arr,axis=1) | |
393 | +# mean_z_arr[:]=s.mean(z_arr,axis=1) | |
394 | + | |
395 | + | |
396 | +# r_gyr=s.sqrt(var_x_arr+var_y_arr+var_z_arr) | |
397 | + | |
398 | +# p.save("r_gyr_my_post_processing.dat",r_gyr) | |
399 | + | |
400 | +# p.plot(time, r_gyr ,linewidth=2.) | |
401 | +# p.xlabel('time') | |
402 | +# p.ylabel('Radius of gyration') | |
403 | +# p.title('Radius of gyration vs time') | |
404 | +# p.grid(True) | |
405 | +# p.savefig('radius of gyration.pdf') | |
406 | +# p.hold(False) | |
407 | + | |
408 | +# #Now I want to calculate the distance of the centre of | |
409 | +# #mass of my aggregate from the origin | |
410 | +# dist_origin=s.sqrt(mean_x_arr**2.+mean_y_arr**2.+mean_z_arr**2.) | |
411 | + | |
412 | +# p.plot(time, dist_origin ,linewidth=2.) | |
413 | +# p.xlabel('time') | |
414 | +# p.ylabel('Mean distance from origin') | |
415 | +# p.title('Mean distance vs time') | |
416 | +# p.grid(True) | |
417 | +# p.savefig('mean_distance_from_origin.pdf') | |
418 | +# p.hold(False) | |
419 | + | |
420 | + | |
421 | + | |
422 | +# #Now some plots of the std's along the other 2 directions | |
423 | +# p.plot(time, s.sqrt(var_y_arr) ,linewidth=2.) | |
424 | +# p.xlabel('time') | |
425 | +# p.ylabel('mean square displacement ') | |
426 | +# p.title('std(y) vs time') | |
427 | +# p.grid(True) | |
428 | +# p.savefig('std_of_y_position.pdf') | |
429 | +# p.hold(False) | |
430 | + | |
431 | +# p.save("std_of_y_position.dat", s.sqrt(var_y_arr)) | |
432 | + | |
433 | + | |
434 | +# p.plot(time, s.sqrt(var_z_arr) ,linewidth=2.) | |
435 | +# p.xlabel('time') | |
436 | +# p.ylabel('mean square displacement ') | |
437 | +# p.title('std(z) vs time') | |
438 | +# p.grid(True) | |
439 | +# p.savefig('std_of_z_position.pdf') | |
440 | +# p.hold(False) | |
441 | + | |
442 | +# p.save("std_of_z_position.dat", s.sqrt(var_z_arr)) | |
443 | + | |
444 | + | |
445 | + | |
446 | +# p.plot(time, mean_y_arr ,linewidth=2.) | |
447 | +# p.xlabel('time') | |
448 | +# p.ylabel('mean y position ') | |
449 | +# p.title('mean(y) vs time') | |
450 | +# p.grid(True) | |
451 | +# p.savefig('mean_y_position.pdf') | |
452 | +# p.hold(False) | |
453 | + | |
454 | +# p.save("mean_y_position.dat", mean_y_arr) | |
455 | + | |
456 | + | |
457 | + | |
458 | + | |
459 | +# p.plot(time, mean_z_arr ,linewidth=2.) | |
460 | +# p.xlabel('time') | |
461 | +# p.ylabel('mean z position ') | |
462 | +# p.title('mean(z) vs time') | |
463 | +# p.grid(True) | |
464 | +# p.savefig('mean_z_position.pdf') | |
465 | +# p.hold(False) | |
466 | + | |
467 | +# p.save("mean_z_position.dat", mean_z_arr) | |
468 | + | |
469 | + | |
470 | +# #Now some comparative plots: | |
471 | + | |
472 | + | |
473 | +# p.plot(time, mean_x_arr,time, mean_y_arr,time, mean_z_arr,linewidth=2.) | |
474 | +# p.xlabel('time') | |
475 | +# p.ylabel('mean position') | |
476 | +# p.legend(('mean x','mean y','mean z',)) | |
477 | +# p.title('Evolution mean position') | |
478 | +# p.grid(True) | |
479 | +# p.savefig('mean_position_comparison.pdf') | |
480 | +# p.hold(False) | |
481 | + | |
482 | + | |
483 | +# p.plot(time, var_x_arr,time, var_y_arr,time, var_z_arr,linewidth=2.) | |
484 | +# p.xlabel('time') | |
485 | +# p.ylabel('mean square displacement') | |
486 | +# p.legend(('var x','var y','var z',)) | |
487 | +# p.title('Evolution mean square displacement') | |
488 | +# p.grid(True) | |
489 | +# p.savefig('mean_square_displacement_comparison.pdf') | |
490 | +# p.hold(False) | |
491 | + | |
492 | + | |
493 | + | |
494 | +# p.plot(time, s.sqrt(var_x_arr),time, s.sqrt(var_y_arr),time,\ | |
495 | +# s.sqrt(var_z_arr),linewidth=2.) | |
496 | +# p.xlabel('time') | |
497 | +# p.ylabel('std of displacement') | |
498 | +# p.legend(('std x','std y','std z',)) | |
499 | +# p.title('Evolution std of displacement') | |
500 | +# p.grid(True) | |
501 | +# p.savefig('std_displacement_comparison.pdf') | |
502 | +# p.hold(False) | |
503 | + | |
504 | + | |
505 | + | |
506 | + | |
507 | + | |
508 | +# # Now I compare my calculations with the one made | |
509 | +# # by espresso: | |
510 | + | |
511 | +# r_gyr_esp=p.load("rgyr.dat") | |
512 | + | |
513 | +# p.plot(time, r_gyr, r_gyr_esp[:,0], r_gyr_esp[:,1],linewidth=2.) | |
514 | +# p.xlabel('time') | |
515 | +# p.ylabel('Radius of gyration') | |
516 | +# p.legend(('my_postprocessing','espresso')) | |
517 | +# p.title('Radius of gyration vs time') | |
518 | +# p.grid(True) | |
519 | +# p.savefig('radius_of_gyration_comparison.pdf') | |
520 | +# p.hold(False) | |
521 | + | |
522 | + | |
523 | + | |
524 | + | |
525 | + | |
526 | +# # Now I do pretty much the same thing with the velocity | |
527 | + | |
528 | +# tot_config_vel=p.load("total_configuration_vel.dat") | |
529 | + | |
530 | + | |
531 | +# if (gyration ==1): | |
532 | +# r_gyr=s.zeros(n_config) | |
533 | +# y_arr=s.zeros((n_config,n_part)) | |
534 | +# z_arr=s.zeros((n_config,n_part)) | |
535 | + | |
536 | +# var_y_arr=s.zeros(n_config) | |
537 | +# var_z_arr=s.zeros(n_config) | |
538 | + | |
539 | +# for i in xrange(0,n_part): | |
540 | +# y_arr[:,i]=tot_config[:,(3*i+1)] | |
541 | +# z_arr[:,i]=tot_config[:,(3*i+2)] | |
542 | + | |
543 | +# var_y_arr[:]=s.var(y_arr,axis=1) | |
544 | +# var_z_arr[:]=s.var(z_arr,axis=1) | |
545 | + | |
546 | + | |
547 | + | |
548 | +# print "the length of tot_config is, ", len(tot_config) | |
549 | +# tot_config_vel=s.reshape(tot_config_vel,(n_config,3*n_part)) | |
550 | + | |
551 | +# #print "tot_config at line 10 is, ", tot_config[10,:] | |
552 | + | |
553 | + | |
554 | +# v_0=tot_config_vel[0,0] #initial x position of all my particles | |
555 | +# v_arr=s.zeros((n_config,n_part)) | |
556 | +# print 'OK creating v_arr' | |
557 | +# v_col=s.arange(n_part)*3 | |
558 | + | |
559 | +# #print "x_col is, ", x_col | |
560 | + | |
561 | +# for i in xrange(0,n_part): | |
562 | +# v_arr[:,i]=tot_config_vel[:,3*i] | |
563 | + | |
564 | +# mean_v_arr=s.zeros(n_config) | |
565 | +# var_v_arr=s.zeros(n_config) | |
566 | + | |
567 | +# # for i in xrange(0, n_config): | |
568 | +# # mean_v_arr[i]=s.mean(v_arr[i,:]) | |
569 | +# #var_v_arr[i]=s.var(v_arr[i,:]) | |
570 | + | |
571 | +# mean_v_arr[:]=s.mean(v_arr,axis=1) | |
572 | +# var_v_arr[:]=s.var(v_arr,axis=1) | |
573 | + | |
574 | + | |
575 | + | |
576 | + | |
577 | +# #print "mean_v_arr is, ", mean_v_arr | |
578 | +# #print "var_v_arr is, ", var_v_arr | |
579 | + | |
580 | +# #time=s.linspace(0.,(n_config*t_step),n_config) | |
581 | + | |
582 | +# p.plot(time, var_v_arr ,linewidth=2.) | |
583 | +# p.xlabel('time') | |
584 | +# p.ylabel('velocity variance') | |
585 | +# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part')) | |
586 | +# p.title('VAR(v) vs time') | |
587 | +# p.grid(True) | |
588 | +# p.savefig('velocity_variance.pdf') | |
589 | +# p.hold(False) | |
590 | + | |
591 | +# p.save("velocity_variance.dat", var_v_arr) | |
592 | + | |
593 | +# #Now I want to calculate the autocorrelation function. | |
594 | +# #First, I need to fix an intial time. I choose something which is well past the | |
595 | +# #ballistic regime | |
596 | + | |
597 | + | |
598 | + | |
599 | +# #Now I start calculating the velocity autocorrelation function | |
600 | + | |
601 | +# vel_autocor=s.zeros(n_config) | |
602 | + | |
603 | + | |
604 | + | |
605 | +# for i in xrange(0, n_config): | |
606 | +# vel_autocor[i]=s.mean(v_arr[i]*v_arr[n_s_ini]) | |
607 | + | |
608 | + | |
609 | +# p.plot(time, vel_autocor ,linewidth=2.) | |
610 | +# p.xlabel('time') | |
611 | +# p.ylabel('<v(s)v(t)> ') | |
612 | +# #p.legend(('beta=1e-2,100 part','beta=1e-1, 100 part', 'beta=1e-1, 200 part')) | |
613 | +# p.title('Velocity correlation') | |
614 | +# p.grid(True) | |
615 | +# p.savefig('velocity_correlation_numerics.pdf') | |
616 | +# p.hold(False) | |
617 | + | |
618 | + | |
619 | +# p.save("velocity_autocorrelation.dat", vel_autocor) | |
620 | + | |
621 | + | |
622 | + | |
623 | + | |
624 | + | |
625 | + | |
626 | +# #Now I try reconstructing the structure of the aggregate. | |
627 | + | |
628 | +# #first a test case | |
629 | + | |
630 | +# my_conf=tot_config[919,:] | |
631 | + | |
632 | +# print 'my_conf is, ', my_conf | |
633 | + | |
634 | + | |
635 | +# #Now I want to reconstruct the structure of the aggregate in 3D | |
636 | +# # I choose particle zero as a reference | |
637 | + | |
638 | +# x_test=s.zeros(3) | |
639 | +# y_test=s.zeros(3) | |
640 | +# z_test=s.zeros(3) | |
641 | + | |
642 | +# for i in xrange(0,3): | |
643 | +# x_test[i]=my_conf[3*i] | |
644 | +# y_test[i]=my_conf[3*i+1] | |
645 | +# z_test[i]=my_conf[3*i+2] | |
646 | + | |
647 | + | |
648 | +# print "x_test is, ", x_test | |
649 | +# print "y_test is, ", y_test | |
650 | +# print "z_test is, ", z_test | |
651 | + | |
652 | +# #OK reading the files | |
653 | + | |
654 | + | |
655 | + | |
656 | +# pos_0=s.zeros(3) #I initialized the positions of the three particles | |
657 | +# pos_1=s.zeros(3) | |
658 | +# pos_2=s.zeros(3) | |
659 | + | |
660 | +# pos_1[0]=r_ij[0] #Now I have given the x-position of particles 1 and 2 wrt particle 0 | |
661 | +# pos_2[0]=r_ij[1] | |
662 | + | |
663 | + | |
664 | +# #Now I do the same for the y coord! | |
665 | + | |
666 | +# count_int=0 | |
667 | + | |
668 | +# for i in xrange(0,(n_part-1)): | |
669 | +# for j in xrange((i+1),n_part): | |
670 | +# r_ij[count_int]=y_test[i]-y_test[j] | |
671 | +# r_ij[count_int]=r_ij[count_int]-Len*n.round(r_ij[count_int]/Len) | |
672 | +# r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now. | |
673 | +# print 'i and j are, ', (i+1), (j+1) | |
674 | + | |
675 | +# count_int=count_int+1 | |
676 | + | |
677 | +# print 'r_ij is, ', r_ij | |
678 | + | |
679 | + | |
680 | +# pos_1[1]=r_ij[0] #Now I have given the x-position of particles 1 and 2 wrt particle 0 | |
681 | +# pos_2[1]=r_ij[1] | |
682 | + | |
683 | + | |
684 | + | |
685 | +# #Now I do the same for the z coord! | |
686 | + | |
687 | +# count_int=0 | |
688 | + | |
689 | +# for i in xrange(0,(n_part-1)): | |
690 | +# for j in xrange((i+1),n_part): | |
691 | +# r_ij[count_int]=z_test[i]-z_test[j] | |
692 | +# r_ij[count_int]=r_ij[count_int]-Len*n.round(r_ij[count_int]/Len) | |
693 | +# r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now. | |
694 | +# print 'i and j are, ', (i+1), (j+1) | |
695 | + | |
696 | +# count_int=count_int+1 | |
697 | + | |
698 | +# print 'r_ij is, ', r_ij | |
699 | + | |
700 | + | |
701 | +# pos_1[2]=r_ij[0] #Now I have given the x-position of particles 1 and 2 wrt particle 0 | |
702 | +# pos_2[2]=r_ij[1] | |
703 | + | |
704 | +# print 'pos_0 is,', pos_0 | |
705 | +# print 'pos_1 is,', pos_1 | |
706 | +# print 'pos_2 is,', pos_2 | |
707 | + | |
708 | +# #now I can calculate the radius of gyration! | |
709 | + | |
710 | +# x_test[0]=pos_0[0] | |
711 | +# x_test[1]=pos_1[0] | |
712 | +# x_test[2]=pos_2[0] | |
713 | + | |
714 | + | |
715 | +# y_test[0]=pos_0[1] | |
716 | +# y_test[1]=pos_1[1] | |
717 | +# y_test[2]=pos_2[1] | |
718 | + | |
719 | + | |
720 | +# z_test[0]=pos_0[2] | |
721 | +# z_test[1]=pos_1[2] | |
722 | +# z_test[2]=pos_2[2] | |
723 | + | |
724 | + | |
725 | +# R_g=s.sqrt(s.var(x_test)+s.var(y_test)+s.var(z_test)) | |
726 | + | |
727 | +# print "the correct radius of gyration is, ", R_g | |
728 | + | |
729 | +############################################################# | |
730 | +############################################################# | |
731 | + #Now I start counting the number of aggregates in each saved configuration | |
732 | + #First I re-build the configuration of the system with the "correct" x_arr,y_arr,z_arr | |
733 | + | |
734 | + # I turned the following bit into a comment since I am using the original coord as | |
735 | + #returned by Espresso to start with | |
736 | + | |
737 | + | |
738 | + | |
739 | + | |
740 | +# #I re-use the tot_config array for this purpose | |
741 | + | |
742 | +# for i in xrange(0,n_part): | |
743 | +# tot_config[:,3*i]=x_arr[:,i] | |
744 | +# tot_config[:,(3*i+1)]=y_arr[:,i] | |
745 | +# tot_config[:,(3*i+2)]=z_arr[:,i] | |
746 | + | |
747 | + #p.save("test_calculating_graph.dat",tot_config[71:73,:]) | |
748 | + | |
749 | + | |
750 | + | |
751 | + | |
752 | +#Now a function to count the collisions | |
753 | +#despite the name, it counts only the number of collisions which took place; it does not know anything | |
754 | +#about the direct calculation of the kernel. | |
755 | + | |
756 | + | |
757 | + def kernel_calc(A): | |
758 | + | |
759 | + d = {} | |
760 | + for r in A: | |
761 | + t = tuple(r) | |
762 | + d[t] = d.get(t,0) + 1 | |
763 | + | |
764 | + # The dict d now has the counts of the unique rows of A. | |
765 | + | |
766 | + B = n.array(d.keys()) # The unique rows of A | |
767 | + C = n.array(d.values()) # The counts of the unique rows | |
768 | + | |
769 | + return B,C | |
770 | + | |
771 | + | |
772 | + #The following function actually takes care of calculating the elements of the kernel matrix from the | |
773 | + #statistics on the collisions. | |
774 | + | |
775 | + | |
776 | + | |
777 | + def kernel_entries_normalized(B2, dist, C2, box_vol, delta_t): | |
778 | + dim=s.shape(B2) | |
779 | + print "dim is, ", dim | |
780 | + | |
781 | + n_i=s.zeros(dim[0]) | |
782 | + n_j=s.zeros(dim[0]) | |
783 | + | |
784 | + | |
785 | + | |
786 | + for i in xrange(len(B2[:,0])): | |
787 | + | |
788 | + n_i[i]=len(s.where(dist==B2[i,0])[0]) | |
789 | + n_j[i]=len(s.where(dist==B2[i,1])[0]) | |
790 | + | |
791 | + n_i=n_i/box_vol | |
792 | + n_j=n_j/box_vol | |
793 | + | |
794 | + | |
795 | + | |
796 | + kernel_list=C2/(n_i*n_j*delta_t*box_vol) #I do not get the whole kernel matrix, | |
797 | + #but only the matrix elements for the collisions which actually took place | |
798 | + | |
799 | + return kernel_list | |
800 | + | |
801 | + | |
802 | + | |
803 | + | |
804 | + #Now a function to calculate the radius of gyration | |
805 | + def calc_radius(x_arr,y_arr,z_arr,Len): | |
806 | + #here x_arr is one-dimensional corresponding to a single configuration | |
807 | + r_0j=s.zeros((len(x_arr)-1)) | |
808 | + for j in xrange(1,len(x_arr)): #so, particle zero is now the reference particle | |
809 | + r_0j[j-1]=x_arr[0]-x_arr[j] | |
810 | + r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len)) | |
811 | + #r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now. | |
812 | + #print 'i and j are, ', (i+1), (j+1) | |
813 | + | |
814 | + | |
815 | + #Now I re-define the x_arr in order to be able to take tha variance correctly | |
816 | + x_arr[0]=0. | |
817 | + x_arr[1:n_part]=r_0j | |
818 | + | |
819 | + #var_x_arr[:]=s.var(r_0j, axis=1) | |
820 | + var_x_arr=s.var(x_arr) | |
821 | + | |
822 | + for j in xrange(1,len(y_arr)): #so, particle zero is now the reference particle | |
823 | + r_0j[j-1]=y_arr[0]-y_arr[j] | |
824 | + r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len)) | |
825 | + #r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now. | |
826 | + #print 'i and j are, ', (i+1), (j+1) | |
827 | + | |
828 | + | |
829 | + #Now I re-define the x_arr in order to be able to take tha variance correctly | |
830 | + y_arr[0]=0. | |
831 | + y_arr[1:n_part]=r_0j | |
832 | + | |
833 | + #var_x_arr[:]=s.var(r_0j, axis=1) | |
834 | + var_y_arr=s.var(y_arr) | |
835 | + | |
836 | + | |
837 | + | |
838 | + for j in xrange(1,len(z_arr)): #so, particle zero is now the reference particle | |
839 | + r_0j[j-1]=z_arr[0]-z_arr[j] | |
840 | + r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len)) | |
841 | + #r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now. | |
842 | + #print 'i and j are, ', (i+1), (j+1) | |
843 | + | |
844 | + | |
845 | + #Now I re-define the x_arr in order to be able to take tha variance correctly | |
846 | + z_arr[0]=0. | |
847 | + z_arr[1:n_part]=r_0j | |
848 | + | |
849 | + #var_x_arr[:]=s.var(r_0j, axis=1) | |
850 | + var_z_arr=s.var(z_arr) | |
851 | + | |
852 | + radius=s.sqrt(var_x_arr+var_y_arr+var_z_arr) | |
853 | + return radius | |
854 | + | |
855 | + | |
856 | + | |
857 | + | |
858 | + #Now a function to calculate the positions of the particles in a single cluster | |
859 | + def calc_coord_single(x_arr,y_arr,z_arr,Len): | |
860 | + | |
861 | + position_array=s.zeros((len(x_arr),3)) | |
862 | + | |
863 | + #here x_arr is one-dimensional corresponding to a single configuration | |
864 | + r_0j=s.zeros((len(x_arr)-1)) | |
865 | + for j in xrange(1,len(x_arr)): #so, particle zero is now the reference particle | |
866 | + r_0j[j-1]=x_arr[0]-x_arr[j] | |
867 | + r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len)) | |
868 | + #r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now. | |
869 | + #print 'i and j are, ', (i+1), (j+1) | |
870 | + | |
871 | + | |
872 | + #Now I re-define the x_arr in order to be able to take tha variance correctly | |
873 | + x_arr[0]=0. | |
874 | + x_arr[1:n_part]=r_0j | |
875 | + | |
876 | + #var_x_arr[:]=s.var(r_0j, axis=1) | |
877 | + #var_x_arr=s.var(x_arr) | |
878 | + | |
879 | + position_array[:,0]=x_arr | |
880 | + | |
881 | + for j in xrange(1,len(y_arr)): #so, particle zero is now the reference particle | |
882 | + r_0j[j-1]=y_arr[0]-y_arr[j] | |
883 | + r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len)) | |
884 | + #r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now. | |
885 | + #print 'i and j are, ', (i+1), (j+1) | |
886 | + | |
887 | + | |
888 | + #Now I re-define the x_arr in order to be able to take tha variance correctly | |
889 | + y_arr[0]=0. | |
890 | + y_arr[1:n_part]=r_0j | |
891 | + | |
892 | + #var_x_arr[:]=s.var(r_0j, axis=1) | |
893 | + #var_y_arr=s.var(y_arr) | |
894 | + | |
895 | + position_array[:,1]=y_arr | |
896 | + | |
897 | + | |
898 | + for j in xrange(1,len(z_arr)): #so, particle zero is now the reference particle | |
899 | + r_0j[j-1]=z_arr[0]-z_arr[j] | |
900 | + r_0j[j-1]=-(r_0j[j-1]-Len*n.round(r_0j[j-1]/Len)) | |
901 | + #r_ij[count_int]=-r_ij[count_int] #I have better reverse the signs now. | |
902 | + #print 'i and j are, ', (i+1), (j+1) | |
903 | + | |
904 | + | |
905 | + #Now I re-define the x_arr in order to be able to take tha variance correctly | |
906 | + z_arr[0]=0. | |
907 | + z_arr[1:n_part]=r_0j | |
908 | + | |
909 | + #var_x_arr[:]=s.var(r_0j, axis=1) | |
910 | + #var_z_arr=s.var(z_arr) | |
911 | + | |
912 | + position_array[:,2]=z_arr | |
913 | + | |
914 | + | |
915 | +# radius=s.sqrt(var_x_arr+var_y_arr+var_z_arr) | |
916 | + | |
917 | + return position_array | |
918 | + | |
919 | + | |
920 | + | |
921 | + | |
922 | + | |
923 | + #Now I try loading the R script | |
924 | + | |
925 | + #r.source("cluster_functions.R") | |
926 | + | |
927 | + #I now calculate the number of clusters in each configuration | |
928 | + | |
929 | + n_clusters=s.zeros(n_config) | |
930 | + mean_dist_part_single_cluster=s.zeros(n_config) | |
931 | + | |
932 | + overall_coord_number=s.zeros(n_config) | |
933 | + v_aver=s.zeros(n_config) | |
934 | + size_single_cluster=s.zeros(n_config) | |
935 | + r_gyr_single_cluster=s.zeros(n_config) | |
936 | + coord_number_single_cluster=s.zeros(n_config) | |
937 | + | |
938 | + correct_coord_number=s.zeros(n_config) | |
939 | + | |
940 | + | |
941 | +# min_dist=s.zeros(n_config) | |
942 | + dist_mat=s.zeros((n_part,n_part)) | |
943 | + for i in xrange(0,n_config): | |
944 | + read_pos="../1/read_pos_%1d"%my_selection[i] | |
945 | + print "read_pos is, ", read_pos | |
946 | + #cluster_name="cluster_dist%05d"%my_selection[i] | |
947 | + test_arr=p.load(read_pos) | |
948 | + #test_arr=s.reshape(test_arr,(n_part,3)) | |
949 | +# if (i==14): | |
950 | +# p.save("test_14.dat",test_arr) | |
951 | + #dist_mat=r.distance(test_arr) | |
952 | + x_pos=test_arr[:,0] | |
953 | + y_pos=test_arr[:,1] | |
954 | + z_pos=test_arr[:,2] | |
955 | + dist_mat=d_calc.distance_calc(x_pos,y_pos,z_pos, box_size) | |
956 | +# if (i==71): | |
957 | +# p.save("distance_matrix_71.dat",dist_mat) | |
958 | +# p.save("x_pos_71.dat",x_pos) | |
959 | + | |
960 | + | |
961 | + | |
962 | + #clust_struc= (r.mycluster2(dist_mat,threshold)) #a cumbersome | |
963 | + #way to make sure I have an array even when clust_struct is a single | |
964 | + #number (i.e. I have only a cluster) | |
965 | +# print'clust_struct is, ', clust_struc | |
966 | +# n_clusters[i]=r.mycluster(dist_mat,threshold) | |
967 | + | |
968 | + | |
969 | +# > Lorenzo, | |
970 | +# > > if your graph is `g` then | |
971 | +# > > degree(g) | |
972 | +# > > gives the number of direct neighbors of each vertex (or particle). | |
973 | +# Just to translate it to Python: g.degree() gives the number of direct | |
974 | +# neighbors of each vertex. If your graph is directed, you may only want | |
975 | +# to count only the outgoing or the incoming edges: g.degree(igraph.OUT) | |
976 | +# or g.degree(igraph.IN) | |
977 | + | |
978 | +# > > So | |
979 | +# > > mean(degree(g)) | |
980 | +# In Python: sum(g.degree()) / float(g.vcount()) | |
981 | + | |
982 | +# > > (and to turn an adjacency matrix `am` into an igraph object `g` just | |
983 | +# > > use "g <- graph.adjacency(am)") | |
984 | +# In Python: g = Graph.Adjacency(matrix) | |
985 | +# e.g. g = Graph.Adjacency([[0,1,0],[1,0,1],[0,1,0]]) | |
986 | + | |
987 | + | |
988 | + cluster_obj=ig.Graph.Adjacency((dist_mat <= threshold).tolist(),\ | |
989 | + ig.ADJ_UNDIRECTED) | |
990 | + | |
991 | + # Now I start the calculation of the coordination number | |
992 | + | |
993 | + coord_list=cluster_obj.degree() #Now I have a list with the number of 1rst | |
994 | + #neughbors of each particle | |
995 | + | |
996 | + #coord_arr=s.zeros(len(coord_list)) | |
997 | + | |
998 | + | |
999 | + | |
1000 | +# Lorenzo, i'm not sure why you would like to use adjacency matrices, | |
1001 | +# igraph graphs are much better in most cases, espacially if the graph | |
1002 | +# is large. Here is how to calculate the mean degree per connected | |
1003 | +# component in R, assuming your graph is 'g': | |
1004 | + | |
1005 | +# comps <- decompose.graph(g) | |
1006 | +# sapply(comps, function(x) mean(degree(x))) | |
1007 | + | |
1008 | +# Gabor | |
1009 | + | |
1010 | + | |
1011 | + | |
1012 | +# In Python: starting with your graph, you obtain a VertexClustering | |
1013 | +# object somehow (I assume you already have it). Now, cl[0] gives the | |
1014 | +# vertex IDs in the first component, cl[1] gives the vertex IDs in the | |
1015 | +# second and so on. If the original graph is called g, then | |
1016 | +# g.degree(cl[0]) gives the degrees of those vertices, so the average | |
1017 | +# degrees in each connected component are as follows (I'm simply | |
1018 | +# iterating over the components in a for loop): | |
1019 | + | |
1020 | +# cl=g.components() | |
1021 | +# for idx, component in enumerate(cl): | |
1022 | +# degrees = g.degree(component) | |
1023 | +# print "Component #%d: %s" % (idx, sum(degrees)/float(len(degrees))) | |
1024 | + | |
1025 | +# -- T. | |
1026 | + | |
1027 | + | |
1028 | + | |
1029 | + | |
1030 | + #I need to get an array out of the list of the list of 1st neighbors | |
1031 | + | |
1032 | + #for l in xrange(len(coord_list)): | |
1033 | + #coord_arr[l]=coord_list[l] | |
1034 | + | |
1035 | + #print "coord_list is, ", coord_list | |
1036 | + coord_arr=s.asarray(coord_list)-2 #Important! I need this correction! | |
1037 | + | |
1038 | + cluster_obj.simplify() | |
1039 | + clustering=cluster_obj.clusters() | |
1040 | + #n_clusters[i]=p.load("nc_temp.dat") | |
1041 | + n_clusters[i]=len(clustering) | |
1042 | + print "the total number of clusters and my_config are,", n_clusters[i], my_selection[i] | |
1043 | + | |
1044 | + my_cluster=clustering.sizes() | |
1045 | + | |
1046 | + #Now I create the array which will contain all the cluster sizes by changing the previous | |
1047 | + #list into a scipy array. | |
1048 | + | |
1049 | + | |
1050 | + | |
1051 | + my_cluster=s.asarray(my_cluster) | |
1052 | + | |
1053 | + | |
1054 | + | |
1055 | + #Now I re-organize the particles in my configuration | |
1056 | + #by putting together those which are in the same | |
1057 | + #cluster | |
1058 | + clust_struc=clustering.membership | |
1059 | + clust_struc=s.asarray(clust_struc) | |
1060 | + #if (i==0): | |
1061 | +# print "clust_struc[4727] and my_selection[i] are, ", clust_struc[4727], my_selection[i] | |
1062 | +# print "clust_struc[29] and my_selection[i] are, ", clust_struc[29], my_selection[i] | |
1063 | + | |
1064 | + | |
1065 | + part_in_clust=s.argsort(clust_struc) | |
1066 | + if (i ==0 ): | |
1067 | + #cluster_record_old=part_in_clust | |
1068 | + part_in_clust_old=s.copy(part_in_clust) | |
1069 | + len_my_cluster_old=len(my_cluster) | |
1070 | + my_cluster_old=s.copy(my_cluster) | |
1071 | + | |
1072 | + | |
1073 | + | |
1074 | + coord_number_dist=s.zeros(len(my_cluster)) #this will contain the distribution of | |
1075 | + #the coordination numbers | |
1076 | + | |
1077 | + | |
1078 | + my_sum=s.cumsum(my_cluster) | |
1079 | + f=s.arange(1) #simply 0 but as an array! | |
1080 | + my_lim=s.concatenate((f,my_sum)) | |
1081 | + if (i==0): | |
1082 | + my_lim_old=my_lim | |
1083 | + | |
1084 | + | |
1085 | + | |
1086 | + for m in xrange(0,len(my_cluster)): | |
1087 | + #if (abs(my_lim[m]-my_lim[m+1])<2): | |
1088 | + # r_gyr_dist[m]=0.0 # r_gyr_dist has already been initialized to zero! | |
1089 | + if (abs(my_lim[m]-my_lim[m+1])>=2): | |
1090 | + | |
1091 | + x_pos2=x_pos[part_in_clust[my_lim[m]:my_lim[m+1]]] | |
1092 | + y_pos2=y_pos[part_in_clust[my_lim[m]:my_lim[m+1]]] | |
1093 | + z_pos2=z_pos[part_in_clust[my_lim[m]:my_lim[m+1]]] | |
1094 | + | |
1095 | + | |
1096 | + coord_clust=coord_arr[part_in_clust[my_lim[m]:my_lim[m+1]]] | |
1097 | + #print "coord_clust is, ", coord_clust | |
1098 | + coord_number_dist[m]=s.mean(coord_clust) | |
1099 | + | |
1100 | + correct_coord_number[i]=coord_number_dist.mean() #i.e. : associate to each cluster a coordination number and | |
1101 | + #then take the average on the set of coordination numbers | |
1102 | + # (one per cluster) | |
1103 | + | |
1104 | +p.save("coordination_numbers_averaged.dat", correct_coord_number ) | |
1105 | + | |
1106 | + | |
1107 | +print "So far so good" |