|  | // SPDX-License-Identifier: GPL-2.0 | 
|  | /* | 
|  | * kernel/lockdep_proc.c | 
|  | * | 
|  | * Runtime locking correctness validator | 
|  | * | 
|  | * Started by Ingo Molnar: | 
|  | * | 
|  | *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
|  | *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra | 
|  | * | 
|  | * Code for /proc/lockdep and /proc/lockdep_stats: | 
|  | * | 
|  | */ | 
|  | #include <linux/export.h> | 
|  | #include <linux/proc_fs.h> | 
|  | #include <linux/seq_file.h> | 
|  | #include <linux/kallsyms.h> | 
|  | #include <linux/debug_locks.h> | 
|  | #include <linux/vmalloc.h> | 
|  | #include <linux/sort.h> | 
|  | #include <linux/uaccess.h> | 
|  | #include <asm/div64.h> | 
|  |  | 
|  | #include "lockdep_internals.h" | 
|  |  | 
|  | /* | 
|  | * Since iteration of lock_classes is done without holding the lockdep lock, | 
|  | * it is not safe to iterate all_lock_classes list directly as the iteration | 
|  | * may branch off to free_lock_classes or the zapped list. Iteration is done | 
|  | * directly on the lock_classes array by checking the lock_classes_in_use | 
|  | * bitmap and max_lock_class_idx. | 
|  | */ | 
|  | #define iterate_lock_classes(idx, class)				\ | 
|  | for (idx = 0, class = lock_classes; idx <= max_lock_class_idx;	\ | 
|  | idx++, class++) | 
|  |  | 
|  | static void *l_next(struct seq_file *m, void *v, loff_t *pos) | 
|  | { | 
|  | struct lock_class *class = v; | 
|  |  | 
|  | ++class; | 
|  | *pos = class - lock_classes; | 
|  | return (*pos > max_lock_class_idx) ? NULL : class; | 
|  | } | 
|  |  | 
|  | static void *l_start(struct seq_file *m, loff_t *pos) | 
|  | { | 
|  | unsigned long idx = *pos; | 
|  |  | 
|  | if (idx > max_lock_class_idx) | 
|  | return NULL; | 
|  | return lock_classes + idx; | 
|  | } | 
|  |  | 
|  | static void l_stop(struct seq_file *m, void *v) | 
|  | { | 
|  | } | 
|  |  | 
|  | static void print_name(struct seq_file *m, struct lock_class *class) | 
|  | { | 
|  | char str[KSYM_NAME_LEN]; | 
|  | const char *name = class->name; | 
|  |  | 
|  | if (!name) { | 
|  | name = __get_key_name(class->key, str); | 
|  | seq_printf(m, "%s", name); | 
|  | } else{ | 
|  | seq_printf(m, "%s", name); | 
|  | if (class->name_version > 1) | 
|  | seq_printf(m, "#%d", class->name_version); | 
|  | if (class->subclass) | 
|  | seq_printf(m, "/%d", class->subclass); | 
|  | } | 
|  | } | 
|  |  | 
|  | static int l_show(struct seq_file *m, void *v) | 
|  | { | 
|  | struct lock_class *class = v; | 
|  | struct lock_list *entry; | 
|  | char usage[LOCK_USAGE_CHARS]; | 
|  | int idx = class - lock_classes; | 
|  |  | 
|  | if (v == lock_classes) | 
|  | seq_printf(m, "all lock classes:\n"); | 
|  |  | 
|  | if (!test_bit(idx, lock_classes_in_use)) | 
|  | return 0; | 
|  |  | 
|  | seq_printf(m, "%p", class->key); | 
|  | #ifdef CONFIG_DEBUG_LOCKDEP | 
|  | seq_printf(m, " OPS:%8ld", debug_class_ops_read(class)); | 
|  | #endif | 
|  | if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { | 
|  | seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class)); | 
|  | seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); | 
|  |  | 
|  | get_usage_chars(class, usage); | 
|  | seq_printf(m, " %s", usage); | 
|  | } | 
|  |  | 
|  | seq_printf(m, ": "); | 
|  | print_name(m, class); | 
|  | seq_puts(m, "\n"); | 
|  |  | 
|  | if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { | 
|  | list_for_each_entry(entry, &class->locks_after, entry) { | 
|  | if (entry->distance == 1) { | 
|  | seq_printf(m, " -> [%p] ", entry->class->key); | 
|  | print_name(m, entry->class); | 
|  | seq_puts(m, "\n"); | 
|  | } | 
|  | } | 
|  | seq_puts(m, "\n"); | 
|  | } | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static const struct seq_operations lockdep_ops = { | 
|  | .start	= l_start, | 
|  | .next	= l_next, | 
|  | .stop	= l_stop, | 
|  | .show	= l_show, | 
|  | }; | 
|  |  | 
|  | #ifdef CONFIG_PROVE_LOCKING | 
|  | static void *lc_start(struct seq_file *m, loff_t *pos) | 
|  | { | 
|  | if (*pos < 0) | 
|  | return NULL; | 
|  |  | 
|  | if (*pos == 0) | 
|  | return SEQ_START_TOKEN; | 
|  |  | 
|  | return lock_chains + (*pos - 1); | 
|  | } | 
|  |  | 
|  | static void *lc_next(struct seq_file *m, void *v, loff_t *pos) | 
|  | { | 
|  | *pos = lockdep_next_lockchain(*pos - 1) + 1; | 
|  | return lc_start(m, pos); | 
|  | } | 
|  |  | 
|  | static void lc_stop(struct seq_file *m, void *v) | 
|  | { | 
|  | } | 
|  |  | 
|  | static int lc_show(struct seq_file *m, void *v) | 
|  | { | 
|  | struct lock_chain *chain = v; | 
|  | struct lock_class *class; | 
|  | int i; | 
|  | static const char * const irq_strs[] = { | 
|  | [0]			     = "0", | 
|  | [LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq", | 
|  | [LOCK_CHAIN_SOFTIRQ_CONTEXT] = "softirq", | 
|  | [LOCK_CHAIN_SOFTIRQ_CONTEXT| | 
|  | LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq|softirq", | 
|  | }; | 
|  |  | 
|  | if (v == SEQ_START_TOKEN) { | 
|  | if (!nr_free_chain_hlocks) | 
|  | seq_printf(m, "(buggered) "); | 
|  | seq_printf(m, "all lock chains:\n"); | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | seq_printf(m, "irq_context: %s\n", irq_strs[chain->irq_context]); | 
|  |  | 
|  | for (i = 0; i < chain->depth; i++) { | 
|  | class = lock_chain_get_class(chain, i); | 
|  | if (!class->key) | 
|  | continue; | 
|  |  | 
|  | seq_printf(m, "[%p] ", class->key); | 
|  | print_name(m, class); | 
|  | seq_puts(m, "\n"); | 
|  | } | 
|  | seq_puts(m, "\n"); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static const struct seq_operations lockdep_chains_ops = { | 
|  | .start	= lc_start, | 
|  | .next	= lc_next, | 
|  | .stop	= lc_stop, | 
|  | .show	= lc_show, | 
|  | }; | 
|  | #endif /* CONFIG_PROVE_LOCKING */ | 
|  |  | 
|  | static void lockdep_stats_debug_show(struct seq_file *m) | 
|  | { | 
|  | #ifdef CONFIG_DEBUG_LOCKDEP | 
|  | unsigned long long hi1 = debug_atomic_read(hardirqs_on_events), | 
|  | hi2 = debug_atomic_read(hardirqs_off_events), | 
|  | hr1 = debug_atomic_read(redundant_hardirqs_on), | 
|  | hr2 = debug_atomic_read(redundant_hardirqs_off), | 
|  | si1 = debug_atomic_read(softirqs_on_events), | 
|  | si2 = debug_atomic_read(softirqs_off_events), | 
|  | sr1 = debug_atomic_read(redundant_softirqs_on), | 
|  | sr2 = debug_atomic_read(redundant_softirqs_off); | 
|  |  | 
|  | seq_printf(m, " chain lookup misses:           %11llu\n", | 
|  | debug_atomic_read(chain_lookup_misses)); | 
|  | seq_printf(m, " chain lookup hits:             %11llu\n", | 
|  | debug_atomic_read(chain_lookup_hits)); | 
|  | seq_printf(m, " cyclic checks:                 %11llu\n", | 
|  | debug_atomic_read(nr_cyclic_checks)); | 
|  | seq_printf(m, " redundant checks:              %11llu\n", | 
|  | debug_atomic_read(nr_redundant_checks)); | 
|  | seq_printf(m, " redundant links:               %11llu\n", | 
|  | debug_atomic_read(nr_redundant)); | 
|  | seq_printf(m, " find-mask forwards checks:     %11llu\n", | 
|  | debug_atomic_read(nr_find_usage_forwards_checks)); | 
|  | seq_printf(m, " find-mask backwards checks:    %11llu\n", | 
|  | debug_atomic_read(nr_find_usage_backwards_checks)); | 
|  |  | 
|  | seq_printf(m, " hardirq on events:             %11llu\n", hi1); | 
|  | seq_printf(m, " hardirq off events:            %11llu\n", hi2); | 
|  | seq_printf(m, " redundant hardirq ons:         %11llu\n", hr1); | 
|  | seq_printf(m, " redundant hardirq offs:        %11llu\n", hr2); | 
|  | seq_printf(m, " softirq on events:             %11llu\n", si1); | 
|  | seq_printf(m, " softirq off events:            %11llu\n", si2); | 
|  | seq_printf(m, " redundant softirq ons:         %11llu\n", sr1); | 
|  | seq_printf(m, " redundant softirq offs:        %11llu\n", sr2); | 
|  | #endif | 
|  | } | 
|  |  | 
|  | static int lockdep_stats_show(struct seq_file *m, void *v) | 
|  | { | 
|  | unsigned long nr_unused = 0, nr_uncategorized = 0, | 
|  | nr_irq_safe = 0, nr_irq_unsafe = 0, | 
|  | nr_softirq_safe = 0, nr_softirq_unsafe = 0, | 
|  | nr_hardirq_safe = 0, nr_hardirq_unsafe = 0, | 
|  | nr_irq_read_safe = 0, nr_irq_read_unsafe = 0, | 
|  | nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0, | 
|  | nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0, | 
|  | sum_forward_deps = 0; | 
|  |  | 
|  | #ifdef CONFIG_PROVE_LOCKING | 
|  | struct lock_class *class; | 
|  | unsigned long idx; | 
|  |  | 
|  | iterate_lock_classes(idx, class) { | 
|  | if (!test_bit(idx, lock_classes_in_use)) | 
|  | continue; | 
|  |  | 
|  | if (class->usage_mask == 0) | 
|  | nr_unused++; | 
|  | if (class->usage_mask == LOCKF_USED) | 
|  | nr_uncategorized++; | 
|  | if (class->usage_mask & LOCKF_USED_IN_IRQ) | 
|  | nr_irq_safe++; | 
|  | if (class->usage_mask & LOCKF_ENABLED_IRQ) | 
|  | nr_irq_unsafe++; | 
|  | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) | 
|  | nr_softirq_safe++; | 
|  | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ) | 
|  | nr_softirq_unsafe++; | 
|  | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) | 
|  | nr_hardirq_safe++; | 
|  | if (class->usage_mask & LOCKF_ENABLED_HARDIRQ) | 
|  | nr_hardirq_unsafe++; | 
|  | if (class->usage_mask & LOCKF_USED_IN_IRQ_READ) | 
|  | nr_irq_read_safe++; | 
|  | if (class->usage_mask & LOCKF_ENABLED_IRQ_READ) | 
|  | nr_irq_read_unsafe++; | 
|  | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) | 
|  | nr_softirq_read_safe++; | 
|  | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ) | 
|  | nr_softirq_read_unsafe++; | 
|  | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) | 
|  | nr_hardirq_read_safe++; | 
|  | if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ) | 
|  | nr_hardirq_read_unsafe++; | 
|  |  | 
|  | sum_forward_deps += lockdep_count_forward_deps(class); | 
|  | } | 
|  |  | 
|  | #ifdef CONFIG_DEBUG_LOCKDEP | 
|  | DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused); | 
|  | #endif | 
|  |  | 
|  | #endif | 
|  | seq_printf(m, " lock-classes:                  %11lu [max: %lu]\n", | 
|  | nr_lock_classes, MAX_LOCKDEP_KEYS); | 
|  | seq_printf(m, " direct dependencies:           %11lu [max: %lu]\n", | 
|  | nr_list_entries, MAX_LOCKDEP_ENTRIES); | 
|  | seq_printf(m, " indirect dependencies:         %11lu\n", | 
|  | sum_forward_deps); | 
|  |  | 
|  | /* | 
|  | * Total number of dependencies: | 
|  | * | 
|  | * All irq-safe locks may nest inside irq-unsafe locks, | 
|  | * plus all the other known dependencies: | 
|  | */ | 
|  | seq_printf(m, " all direct dependencies:       %11lu\n", | 
|  | nr_irq_unsafe * nr_irq_safe + | 
|  | nr_hardirq_unsafe * nr_hardirq_safe + | 
|  | nr_list_entries); | 
|  |  | 
|  | #ifdef CONFIG_PROVE_LOCKING | 
|  | seq_printf(m, " dependency chains:             %11lu [max: %lu]\n", | 
|  | lock_chain_count(), MAX_LOCKDEP_CHAINS); | 
|  | seq_printf(m, " dependency chain hlocks used:  %11lu [max: %lu]\n", | 
|  | MAX_LOCKDEP_CHAIN_HLOCKS - | 
|  | (nr_free_chain_hlocks + nr_lost_chain_hlocks), | 
|  | MAX_LOCKDEP_CHAIN_HLOCKS); | 
|  | seq_printf(m, " dependency chain hlocks lost:  %11u\n", | 
|  | nr_lost_chain_hlocks); | 
|  | #endif | 
|  |  | 
|  | #ifdef CONFIG_TRACE_IRQFLAGS | 
|  | seq_printf(m, " in-hardirq chains:             %11u\n", | 
|  | nr_hardirq_chains); | 
|  | seq_printf(m, " in-softirq chains:             %11u\n", | 
|  | nr_softirq_chains); | 
|  | #endif | 
|  | seq_printf(m, " in-process chains:             %11u\n", | 
|  | nr_process_chains); | 
|  | seq_printf(m, " stack-trace entries:           %11lu [max: %lu]\n", | 
|  | nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES); | 
|  | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) | 
|  | seq_printf(m, " number of stack traces:        %11llu\n", | 
|  | lockdep_stack_trace_count()); | 
|  | seq_printf(m, " number of stack hash chains:   %11llu\n", | 
|  | lockdep_stack_hash_count()); | 
|  | #endif | 
|  | seq_printf(m, " combined max dependencies:     %11u\n", | 
|  | (nr_hardirq_chains + 1) * | 
|  | (nr_softirq_chains + 1) * | 
|  | (nr_process_chains + 1) | 
|  | ); | 
|  | seq_printf(m, " hardirq-safe locks:            %11lu\n", | 
|  | nr_hardirq_safe); | 
|  | seq_printf(m, " hardirq-unsafe locks:          %11lu\n", | 
|  | nr_hardirq_unsafe); | 
|  | seq_printf(m, " softirq-safe locks:            %11lu\n", | 
|  | nr_softirq_safe); | 
|  | seq_printf(m, " softirq-unsafe locks:          %11lu\n", | 
|  | nr_softirq_unsafe); | 
|  | seq_printf(m, " irq-safe locks:                %11lu\n", | 
|  | nr_irq_safe); | 
|  | seq_printf(m, " irq-unsafe locks:              %11lu\n", | 
|  | nr_irq_unsafe); | 
|  |  | 
|  | seq_printf(m, " hardirq-read-safe locks:       %11lu\n", | 
|  | nr_hardirq_read_safe); | 
|  | seq_printf(m, " hardirq-read-unsafe locks:     %11lu\n", | 
|  | nr_hardirq_read_unsafe); | 
|  | seq_printf(m, " softirq-read-safe locks:       %11lu\n", | 
|  | nr_softirq_read_safe); | 
|  | seq_printf(m, " softirq-read-unsafe locks:     %11lu\n", | 
|  | nr_softirq_read_unsafe); | 
|  | seq_printf(m, " irq-read-safe locks:           %11lu\n", | 
|  | nr_irq_read_safe); | 
|  | seq_printf(m, " irq-read-unsafe locks:         %11lu\n", | 
|  | nr_irq_read_unsafe); | 
|  |  | 
|  | seq_printf(m, " uncategorized locks:           %11lu\n", | 
|  | nr_uncategorized); | 
|  | seq_printf(m, " unused locks:                  %11lu\n", | 
|  | nr_unused); | 
|  | seq_printf(m, " max locking depth:             %11u\n", | 
|  | max_lockdep_depth); | 
|  | #ifdef CONFIG_PROVE_LOCKING | 
|  | seq_printf(m, " max bfs queue depth:           %11u\n", | 
|  | max_bfs_queue_depth); | 
|  | #endif | 
|  | seq_printf(m, " max lock class index:          %11lu\n", | 
|  | max_lock_class_idx); | 
|  | lockdep_stats_debug_show(m); | 
|  | seq_printf(m, " debug_locks:                   %11u\n", | 
|  | debug_locks); | 
|  |  | 
|  | /* | 
|  | * Zapped classes and lockdep data buffers reuse statistics. | 
|  | */ | 
|  | seq_puts(m, "\n"); | 
|  | seq_printf(m, " zapped classes:                %11lu\n", | 
|  | nr_zapped_classes); | 
|  | #ifdef CONFIG_PROVE_LOCKING | 
|  | seq_printf(m, " zapped lock chains:            %11lu\n", | 
|  | nr_zapped_lock_chains); | 
|  | seq_printf(m, " large chain blocks:            %11u\n", | 
|  | nr_large_chain_blocks); | 
|  | #endif | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | #ifdef CONFIG_LOCK_STAT | 
|  |  | 
|  | struct lock_stat_data { | 
|  | struct lock_class *class; | 
|  | struct lock_class_stats stats; | 
|  | }; | 
|  |  | 
|  | struct lock_stat_seq { | 
|  | struct lock_stat_data *iter_end; | 
|  | struct lock_stat_data stats[MAX_LOCKDEP_KEYS]; | 
|  | }; | 
|  |  | 
|  | /* | 
|  | * sort on absolute number of contentions | 
|  | */ | 
|  | static int lock_stat_cmp(const void *l, const void *r) | 
|  | { | 
|  | const struct lock_stat_data *dl = l, *dr = r; | 
|  | unsigned long nl, nr; | 
|  |  | 
|  | nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr; | 
|  | nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr; | 
|  |  | 
|  | return nr - nl; | 
|  | } | 
|  |  | 
|  | static void seq_line(struct seq_file *m, char c, int offset, int length) | 
|  | { | 
|  | int i; | 
|  |  | 
|  | for (i = 0; i < offset; i++) | 
|  | seq_puts(m, " "); | 
|  | for (i = 0; i < length; i++) | 
|  | seq_putc(m, c); | 
|  | seq_puts(m, "\n"); | 
|  | } | 
|  |  | 
|  | static void snprint_time(char *buf, size_t bufsiz, s64 nr) | 
|  | { | 
|  | s64 div; | 
|  | s32 rem; | 
|  |  | 
|  | nr += 5; /* for display rounding */ | 
|  | div = div_s64_rem(nr, 1000, &rem); | 
|  | snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10); | 
|  | } | 
|  |  | 
|  | static void seq_time(struct seq_file *m, s64 time) | 
|  | { | 
|  | char num[22]; | 
|  |  | 
|  | snprint_time(num, sizeof(num), time); | 
|  | seq_printf(m, " %14s", num); | 
|  | } | 
|  |  | 
|  | static void seq_lock_time(struct seq_file *m, struct lock_time *lt) | 
|  | { | 
|  | seq_printf(m, "%14lu", lt->nr); | 
|  | seq_time(m, lt->min); | 
|  | seq_time(m, lt->max); | 
|  | seq_time(m, lt->total); | 
|  | seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0); | 
|  | } | 
|  |  | 
|  | static void seq_stats(struct seq_file *m, struct lock_stat_data *data) | 
|  | { | 
|  | const struct lockdep_subclass_key *ckey; | 
|  | struct lock_class_stats *stats; | 
|  | struct lock_class *class; | 
|  | const char *cname; | 
|  | int i, namelen; | 
|  | char name[39]; | 
|  |  | 
|  | class = data->class; | 
|  | stats = &data->stats; | 
|  |  | 
|  | namelen = 38; | 
|  | if (class->name_version > 1) | 
|  | namelen -= 2; /* XXX truncates versions > 9 */ | 
|  | if (class->subclass) | 
|  | namelen -= 2; | 
|  |  | 
|  | rcu_read_lock_sched(); | 
|  | cname = rcu_dereference_sched(class->name); | 
|  | ckey  = rcu_dereference_sched(class->key); | 
|  |  | 
|  | if (!cname && !ckey) { | 
|  | rcu_read_unlock_sched(); | 
|  | return; | 
|  |  | 
|  | } else if (!cname) { | 
|  | char str[KSYM_NAME_LEN]; | 
|  | const char *key_name; | 
|  |  | 
|  | key_name = __get_key_name(ckey, str); | 
|  | snprintf(name, namelen, "%s", key_name); | 
|  | } else { | 
|  | snprintf(name, namelen, "%s", cname); | 
|  | } | 
|  | rcu_read_unlock_sched(); | 
|  |  | 
|  | namelen = strlen(name); | 
|  | if (class->name_version > 1) { | 
|  | snprintf(name+namelen, 3, "#%d", class->name_version); | 
|  | namelen += 2; | 
|  | } | 
|  | if (class->subclass) { | 
|  | snprintf(name+namelen, 3, "/%d", class->subclass); | 
|  | namelen += 2; | 
|  | } | 
|  |  | 
|  | if (stats->write_holdtime.nr) { | 
|  | if (stats->read_holdtime.nr) | 
|  | seq_printf(m, "%38s-W:", name); | 
|  | else | 
|  | seq_printf(m, "%40s:", name); | 
|  |  | 
|  | seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]); | 
|  | seq_lock_time(m, &stats->write_waittime); | 
|  | seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]); | 
|  | seq_lock_time(m, &stats->write_holdtime); | 
|  | seq_puts(m, "\n"); | 
|  | } | 
|  |  | 
|  | if (stats->read_holdtime.nr) { | 
|  | seq_printf(m, "%38s-R:", name); | 
|  | seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]); | 
|  | seq_lock_time(m, &stats->read_waittime); | 
|  | seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]); | 
|  | seq_lock_time(m, &stats->read_holdtime); | 
|  | seq_puts(m, "\n"); | 
|  | } | 
|  |  | 
|  | if (stats->read_waittime.nr + stats->write_waittime.nr == 0) | 
|  | return; | 
|  |  | 
|  | if (stats->read_holdtime.nr) | 
|  | namelen += 2; | 
|  |  | 
|  | for (i = 0; i < LOCKSTAT_POINTS; i++) { | 
|  | char ip[32]; | 
|  |  | 
|  | if (class->contention_point[i] == 0) | 
|  | break; | 
|  |  | 
|  | if (!i) | 
|  | seq_line(m, '-', 40-namelen, namelen); | 
|  |  | 
|  | snprintf(ip, sizeof(ip), "[<%p>]", | 
|  | (void *)class->contention_point[i]); | 
|  | seq_printf(m, "%40s %14lu %29s %pS\n", | 
|  | name, stats->contention_point[i], | 
|  | ip, (void *)class->contention_point[i]); | 
|  | } | 
|  | for (i = 0; i < LOCKSTAT_POINTS; i++) { | 
|  | char ip[32]; | 
|  |  | 
|  | if (class->contending_point[i] == 0) | 
|  | break; | 
|  |  | 
|  | if (!i) | 
|  | seq_line(m, '-', 40-namelen, namelen); | 
|  |  | 
|  | snprintf(ip, sizeof(ip), "[<%p>]", | 
|  | (void *)class->contending_point[i]); | 
|  | seq_printf(m, "%40s %14lu %29s %pS\n", | 
|  | name, stats->contending_point[i], | 
|  | ip, (void *)class->contending_point[i]); | 
|  | } | 
|  | if (i) { | 
|  | seq_puts(m, "\n"); | 
|  | seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1)); | 
|  | seq_puts(m, "\n"); | 
|  | } | 
|  | } | 
|  |  | 
|  | static void seq_header(struct seq_file *m) | 
|  | { | 
|  | seq_puts(m, "lock_stat version 0.4\n"); | 
|  |  | 
|  | if (unlikely(!debug_locks)) | 
|  | seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n"); | 
|  |  | 
|  | seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1)); | 
|  | seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s " | 
|  | "%14s %14s\n", | 
|  | "class name", | 
|  | "con-bounces", | 
|  | "contentions", | 
|  | "waittime-min", | 
|  | "waittime-max", | 
|  | "waittime-total", | 
|  | "waittime-avg", | 
|  | "acq-bounces", | 
|  | "acquisitions", | 
|  | "holdtime-min", | 
|  | "holdtime-max", | 
|  | "holdtime-total", | 
|  | "holdtime-avg"); | 
|  | seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1)); | 
|  | seq_printf(m, "\n"); | 
|  | } | 
|  |  | 
|  | static void *ls_start(struct seq_file *m, loff_t *pos) | 
|  | { | 
|  | struct lock_stat_seq *data = m->private; | 
|  | struct lock_stat_data *iter; | 
|  |  | 
|  | if (*pos == 0) | 
|  | return SEQ_START_TOKEN; | 
|  |  | 
|  | iter = data->stats + (*pos - 1); | 
|  | if (iter >= data->iter_end) | 
|  | iter = NULL; | 
|  |  | 
|  | return iter; | 
|  | } | 
|  |  | 
|  | static void *ls_next(struct seq_file *m, void *v, loff_t *pos) | 
|  | { | 
|  | (*pos)++; | 
|  | return ls_start(m, pos); | 
|  | } | 
|  |  | 
|  | static void ls_stop(struct seq_file *m, void *v) | 
|  | { | 
|  | } | 
|  |  | 
|  | static int ls_show(struct seq_file *m, void *v) | 
|  | { | 
|  | if (v == SEQ_START_TOKEN) | 
|  | seq_header(m); | 
|  | else | 
|  | seq_stats(m, v); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static const struct seq_operations lockstat_ops = { | 
|  | .start	= ls_start, | 
|  | .next	= ls_next, | 
|  | .stop	= ls_stop, | 
|  | .show	= ls_show, | 
|  | }; | 
|  |  | 
|  | static int lock_stat_open(struct inode *inode, struct file *file) | 
|  | { | 
|  | int res; | 
|  | struct lock_class *class; | 
|  | struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq)); | 
|  |  | 
|  | if (!data) | 
|  | return -ENOMEM; | 
|  |  | 
|  | res = seq_open(file, &lockstat_ops); | 
|  | if (!res) { | 
|  | struct lock_stat_data *iter = data->stats; | 
|  | struct seq_file *m = file->private_data; | 
|  | unsigned long idx; | 
|  |  | 
|  | iterate_lock_classes(idx, class) { | 
|  | if (!test_bit(idx, lock_classes_in_use)) | 
|  | continue; | 
|  | iter->class = class; | 
|  | iter->stats = lock_stats(class); | 
|  | iter++; | 
|  | } | 
|  |  | 
|  | data->iter_end = iter; | 
|  |  | 
|  | sort(data->stats, data->iter_end - data->stats, | 
|  | sizeof(struct lock_stat_data), | 
|  | lock_stat_cmp, NULL); | 
|  |  | 
|  | m->private = data; | 
|  | } else | 
|  | vfree(data); | 
|  |  | 
|  | return res; | 
|  | } | 
|  |  | 
|  | static ssize_t lock_stat_write(struct file *file, const char __user *buf, | 
|  | size_t count, loff_t *ppos) | 
|  | { | 
|  | struct lock_class *class; | 
|  | unsigned long idx; | 
|  | char c; | 
|  |  | 
|  | if (count) { | 
|  | if (get_user(c, buf)) | 
|  | return -EFAULT; | 
|  |  | 
|  | if (c != '0') | 
|  | return count; | 
|  |  | 
|  | iterate_lock_classes(idx, class) { | 
|  | if (!test_bit(idx, lock_classes_in_use)) | 
|  | continue; | 
|  | clear_lock_stats(class); | 
|  | } | 
|  | } | 
|  | return count; | 
|  | } | 
|  |  | 
|  | static int lock_stat_release(struct inode *inode, struct file *file) | 
|  | { | 
|  | struct seq_file *seq = file->private_data; | 
|  |  | 
|  | vfree(seq->private); | 
|  | return seq_release(inode, file); | 
|  | } | 
|  |  | 
|  | static const struct proc_ops lock_stat_proc_ops = { | 
|  | .proc_open	= lock_stat_open, | 
|  | .proc_write	= lock_stat_write, | 
|  | .proc_read	= seq_read, | 
|  | .proc_lseek	= seq_lseek, | 
|  | .proc_release	= lock_stat_release, | 
|  | }; | 
|  | #endif /* CONFIG_LOCK_STAT */ | 
|  |  | 
|  | static int __init lockdep_proc_init(void) | 
|  | { | 
|  | proc_create_seq("lockdep", S_IRUSR, NULL, &lockdep_ops); | 
|  | #ifdef CONFIG_PROVE_LOCKING | 
|  | proc_create_seq("lockdep_chains", S_IRUSR, NULL, &lockdep_chains_ops); | 
|  | #endif | 
|  | proc_create_single("lockdep_stats", S_IRUSR, NULL, lockdep_stats_show); | 
|  | #ifdef CONFIG_LOCK_STAT | 
|  | proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, &lock_stat_proc_ops); | 
|  | #endif | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | __initcall(lockdep_proc_init); | 
|  |  |