/* * partialator.c * * Scaling and post refinement for coherent nanocrystallography * * Copyright © 2012 Deutsches Elektronen-Synchrotron DESY, * a research centre of the Helmholtz Association. * * Authors: * 2010-2012 Thomas White * * This file is part of CrystFEL. * * CrystFEL is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CrystFEL is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with CrystFEL. If not, see . * */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "post-refinement.h" #include "hrs-scaling.h" #include "scaling-report.h" static void show_help(const char *s) { printf("Syntax: %s [options]\n\n", s); printf( "Scaling and post refinement for coherent nanocrystallography.\n" "\n" " -h, --help Display this help message.\n" "\n" " -i, --input= Specify the name of the input 'stream'.\n" " (must be a file, not e.g. stdin)\n" " -o, --output= Output filename. Default: partialator.hkl.\n" " -g. --geometry= Get detector geometry from file.\n" " -b, --beam= Get beam parameters from file, which provides\n" " initial values for parameters, and nominal\n" " wavelengths if no per-shot value is found in \n" " an HDF5 file.\n" " -y, --symmetry= Merge according to symmetry .\n" " -n, --iterations= Run cycles of scaling and post-refinement.\n" " --no-scale Fix all the scaling factors at unity.\n" " -r, --reference= Refine images against reflections in ,\n" " instead of taking the mean of the intensity\n" " estimates.\n" "\n" " -j Run analyses in parallel.\n"); } struct refine_args { RefList *full; struct image *image; }; struct queue_args { int n; int n_done; int n_total_patterns; struct image *images; struct refine_args task_defaults; }; static void refine_image(void *task, int id) { struct refine_args *pargs = task; struct image *image = pargs->image; image->id = id; pr_refine(image, pargs->full); } static void *get_image(void *vqargs) { struct refine_args *task; struct queue_args *qargs = vqargs; task = malloc(sizeof(struct refine_args)); memcpy(task, &qargs->task_defaults, sizeof(struct refine_args)); task->image = &qargs->images[qargs->n]; qargs->n++; return task; } static void done_image(void *vqargs, void *task) { struct queue_args *qargs = vqargs; qargs->n_done++; progress_bar(qargs->n_done, qargs->n_total_patterns, "Refining"); free(task); } static void refine_all(struct image *images, int n_total_patterns, struct detector *det, RefList *full, int nthreads) { struct refine_args task_defaults; struct queue_args qargs; task_defaults.full = full; task_defaults.image = NULL; qargs.task_defaults = task_defaults; qargs.n = 0; qargs.n_done = 0; qargs.n_total_patterns = n_total_patterns; qargs.images = images; /* Don't have threads which are doing nothing */ if ( n_total_patterns < nthreads ) nthreads = n_total_patterns; run_threads(nthreads, refine_image, get_image, done_image, &qargs, n_total_patterns, 0, 0, 0); } /* Decide which reflections can be scaled */ static int select_scalable_reflections(RefList *list, RefList *reference) { Reflection *refl; RefListIterator *iter; int nobs = 0; for ( refl = first_refl(list, &iter); refl != NULL; refl = next_refl(refl, iter) ) { int sc = 1; double v, esd; /* This means the reflection was not found on the last check */ if ( get_redundancy(refl) == 0 ) sc = 0; if ( get_partiality(refl) < 0.1 ) sc = 0; v = fabs(get_intensity(refl)); esd = get_esd_intensity(refl); if ( v < 0.5*esd ) sc = 0; /* If we are scaling against a reference set, we additionally * require that this reflection is in the reference list. */ if ( reference != NULL ) { signed int h, k, l; get_indices(refl, &h, &k, &l); if ( find_refl(reference, h, k, l) == NULL ) sc = 0; } set_scalable(refl, sc); if ( sc ) nobs++; } return nobs; } static void select_reflections_for_refinement(struct image *images, int n, RefList *full, int have_reference) { int i; for ( i=0; i= 2) || have_reference ) { set_refinable(refl, 1); n_acc++; } else { n_fewmatch++; } } else { n_nomatch++; set_refinable(refl, 0); } } } //STATUS("Image %4i: %i guide reflections accepted " // "(%i not scalable, %i few matches, %i total)\n", // i, n_acc, n_noscale, n_fewmatch, n_ref); /* This would be a silly situation, since there must be a match * if THIS pattern has a scalable part of the reflection! */ assert(n_nomatch == 0); } } int main(int argc, char *argv[]) { int c; char *infile = NULL; char *outfile = NULL; char *geomfile = NULL; char *sym_str = NULL; SymOpList *sym; FILE *fh; int nthreads = 1; struct detector *det; int i; int n_total_patterns; struct image *images; int n_iter = 10; struct beam_params *beam = NULL; RefList *full; int n_usable_patterns = 0; int nobs; char *reference_file = NULL; RefList *reference = NULL; int n_dud; int have_reference = 0; char cmdline[1024]; SRContext *sr; int noscale = 0; /* Long options */ const struct option longopts[] = { {"help", 0, NULL, 'h'}, {"input", 1, NULL, 'i'}, {"output", 1, NULL, 'o'}, {"geometry", 1, NULL, 'g'}, {"beam", 1, NULL, 'b'}, {"symmetry", 1, NULL, 'y'}, {"iterations", 1, NULL, 'n'}, {"no-scale", 0, &noscale, 1}, {"reference", 1, NULL, 'r'}, {0, 0, NULL, 0} }; cmdline[0] = '\0'; for ( i=1; idet = det; if ( read_chunk(fh, cur) != 0 ) { /* Should not happen, because we counted the patterns * earlier. */ ERROR("Failed to read chunk from the input stream.\n"); return 1; } /* Won't be needing this, if it exists */ image_feature_list_free(cur->features); cur->features = NULL; /* "n_usable_patterns" will not be incremented in this case */ if ( cur->indexed_cell == NULL ) continue; /* Fill in initial estimates of stuff */ cur->div = beam->divergence; cur->bw = beam->bandwidth; cur->width = det->max_fs; cur->height = det->max_ss; cur->osf = 1.0; cur->profile_radius = 0.003e9; cur->pr_dud = 0; /* Muppet proofing */ cur->data = NULL; cur->flags = NULL; cur->beam = NULL; /* This is the raw list of reflections */ as = asymmetric_indices(cur->reflections, sym); reflist_free(cur->reflections); cur->reflections = as; update_partialities(cur); nobs += select_scalable_reflections(cur->reflections, reference); progress_bar(i, n_total_patterns-1, "Loading pattern data"); n_usable_patterns++; } fclose(fh); /* Make initial estimates */ STATUS("Performing initial scaling.\n"); if ( noscale ) STATUS("Scale factors fixed at 1.\n"); full = scale_intensities(images, n_usable_patterns, reference, nthreads, noscale); sr = sr_titlepage(images, n_usable_patterns, "scaling-report.pdf", infile, cmdline); sr_iteration(sr, 0, images, n_usable_patterns, full); /* Iterate */ for ( i=0; ireflections, reference); } /* Re-estimate all the full intensities */ reflist_free(full); full = scale_intensities(images, n_usable_patterns, reference, nthreads, noscale); sr_iteration(sr, i+1, images, n_usable_patterns, full); } sr_finish(sr); n_dud = 0; for ( i=0; i