diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 88e746074252298ecae8f0aab9db2bd1c9f4fb99..cf88c1f982701983e197fa745b9cd0a7ed59cc4b 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -177,6 +177,15 @@ cgroup v2 currently supports the following mount options. ignored on non-init namespace mounts. Please refer to the Delegation section for details. + memory_localevents + + Only populate memory.events with data for the current cgroup, + and not any subtrees. This is legacy behaviour, the default + behaviour without this option is to include subtree counts. + This option is system wide and can only be set on mount or + modified through remount from the init namespace. The mount + option is ignored on non-init namespace mounts. + Organizing Processes and Threads -------------------------------- diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst index ec1efa32af3cafb0b309c2653ebc008cbdccd860..7cdf7282e0229fdae4d210246e8e804fb0ade06b 100644 --- a/Documentation/vm/hmm.rst +++ b/Documentation/vm/hmm.rst @@ -288,15 +288,17 @@ For instance if the device flags for device entries are: WRITE (1 << 62) Now let say that device driver wants to fault with at least read a range then -it does set: - range->default_flags = (1 << 63) +it does set:: + + range->default_flags = (1 << 63); range->pfn_flags_mask = 0; and calls hmm_range_fault() as described above. This will fill fault all page in the range with at least read permission. Now let say driver wants to do the same except for one page in the range for -which its want to have write. Now driver set: +which its want to have write. Now driver set:: + range->default_flags = (1 << 63); range->pfn_flags_mask = (1 << 62); range->pfns[index_of_write] = (1 << 62); diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index c16c1829a5e4f8732033ea18c5de327aa2eb2878..aa075d8372ea20bd030d43663b61acba3e758d4e 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c @@ -32,6 +32,7 @@ extern char * strstr(const char * s1, const char *s2); extern size_t strlen(const char *s); extern int memcmp(const void *cs, const void *ct, size_t count); +extern char * strchrnul(const char *, int); #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig index 088ab948a5caf8b8f7455233c1cba5295e00cc0f..900b00084953377eec52bf4c4e853964f72ff589 100644 --- a/arch/parisc/configs/c8000_defconfig +++ b/arch/parisc/configs/c8000_defconfig @@ -225,7 +225,6 @@ CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_SLAB=y -CONFIG_DEBUG_SLAB_LEAK=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_PANIC_ON_OOPS=y diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a209199f3af646054469378075ab6dae0e61b2f8..09b8ff0d856a59c017af2b39cd2203e855a456fe 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3034,7 +3034,8 @@ static int __init iommu_prepare_static_identity_mapping(int hw) { struct pci_dev *pdev = NULL; struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; + /* To avoid a -Wunused-but-set-variable warning. */ + struct intel_iommu *iommu __maybe_unused; struct device *dev; int i; int ret = 0; diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c index f65f2b2f594d4f0a705faf58ab068054124d79af..1906cc962c4d935055ddc83420354573b2cb16a2 100644 --- a/fs/ocfs2/filecheck.c +++ b/fs/ocfs2/filecheck.c @@ -193,6 +193,7 @@ int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb) ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck, NULL, "filecheck"); if (ret) { + kobject_put(&entry->fs_kobj); kfree(fcheck); return ret; } diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 77258d276f9350c54b1aca6c713a39d826a9715d..11e215d7937e61efbf6dd229275528bb75f4b70f 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -89,6 +89,11 @@ enum { * Enable cpuset controller in v1 cgroup to use v2 behavior. */ CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), + + /* + * Enable legacy local memory.events. + */ + CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5), }; /* cftype->flags */ diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h index 3a91130a4fbd54040301f7da2baf4a242b14ab67..02393c0c98f994bf827ec104fa0c5925d62eae16 100644 --- a/include/linux/generic-radix-tree.h +++ b/include/linux/generic-radix-tree.h @@ -2,7 +2,7 @@ #define _LINUX_GENERIC_RADIX_TREE_H /** - * DOC: Generic radix trees/sparse arrays: + * DOC: Generic radix trees/sparse arrays * * Very simple and minimalistic, supporting arbitrary size entries up to * PAGE_SIZE. diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index aa5efd9351ebd80a5db735184be7eb34993a174b..d5ceb2839a2ded8ccf49524d76a5eedd666b1cd1 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -54,6 +54,7 @@ struct list_lru { #ifdef CONFIG_MEMCG_KMEM struct list_head list; int shrinker_id; + bool memcg_aware; #endif }; diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 73fe0a700911a4627eca3a06de21d090215a1268..edf9e8f32d7025fd53412f414f4d136fb6867d1f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -737,8 +737,14 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, static inline void memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event) { - atomic_long_inc(&memcg->memory_events[event]); - cgroup_file_notify(&memcg->events_file); + do { + atomic_long_inc(&memcg->memory_events[event]); + cgroup_file_notify(&memcg->events_file); + + if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) + break; + } while ((memcg = parent_mem_cgroup(memcg)) && + !mem_cgroup_is_root(memcg)); } static inline void memcg_memory_event_mm(struct mm_struct *mm, diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 217cec4e22c68c6053b0e8406b670affd64963dc..426a0026225c29085d8e09c7f4920c6edbd216be 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1810,11 +1810,13 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, enum cgroup2_param { Opt_nsdelegate, + Opt_memory_localevents, nr__cgroup2_params }; static const struct fs_parameter_spec cgroup2_param_specs[] = { - fsparam_flag ("nsdelegate", Opt_nsdelegate), + fsparam_flag("nsdelegate", Opt_nsdelegate), + fsparam_flag("memory_localevents", Opt_memory_localevents), {} }; @@ -1837,6 +1839,9 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param case Opt_nsdelegate: ctx->flags |= CGRP_ROOT_NS_DELEGATE; return 0; + case Opt_memory_localevents: + ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS; + return 0; } return -EINVAL; } @@ -1848,6 +1853,11 @@ static void apply_cgroup_root_flags(unsigned int root_flags) cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE; else cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE; + + if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) + cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS; + else + cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS; } } @@ -1855,6 +1865,8 @@ static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root { if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) seq_puts(seq, ",nsdelegate"); + if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) + seq_puts(seq, ",memory_localevents"); return 0; } @@ -6325,7 +6337,7 @@ static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate); static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "nsdelegate\n"); + return snprintf(buf, PAGE_SIZE, "nsdelegate\nmemory_localevents\n"); } static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features); diff --git a/kernel/fork.c b/kernel/fork.c index b2b87d450b80b5e08899b1677848d466a643ead8..75675b9bf6dfd36066336c384e007de312f54004 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -123,7 +123,7 @@ unsigned long total_forks; /* Handle normal Linux uptimes. */ int nr_threads; /* The idle threads do not count.. */ -int max_threads; /* tunable limit on nr_threads */ +static int max_threads; /* tunable limit on nr_threads */ DEFINE_PER_CPU(unsigned long, process_counts) = 0; diff --git a/kernel/signal.c b/kernel/signal.c index d7b9d14ac80dab05a4458320337e5eb9c4a00e59..328a01e1a2f05df4b88f4752e0ed9cf277fee4d9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2485,6 +2485,8 @@ bool get_signal(struct ksignal *ksig) if (signal_group_exit(signal)) { ksig->info.si_signo = signr = SIGKILL; sigdelset(¤t->pending.signal, SIGKILL); + trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, + &sighand->action[SIGKILL - 1]); recalc_sigpending(); goto fatal; } diff --git a/kernel/sys.c b/kernel/sys.c index bdbfe8d37418fd5f20ff429d15258f331cb7bf68..2969304c29fe3628ca46a2db6ca824588c12f72b 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1882,13 +1882,14 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) } /* + * Check arithmetic relations of passed addresses. + * * WARNING: we don't require any capability here so be very careful * in what is allowed for modification from userspace. */ -static int validate_prctl_map(struct prctl_mm_map *prctl_map) +static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map) { unsigned long mmap_max_addr = TASK_SIZE; - struct mm_struct *mm = current->mm; int error = -EINVAL, i; static const unsigned char offsets[] = { @@ -1949,24 +1950,6 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map) prctl_map->start_data)) goto out; - /* - * Someone is trying to cheat the auxv vector. - */ - if (prctl_map->auxv_size) { - if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv)) - goto out; - } - - /* - * Finally, make sure the caller has the rights to - * change /proc/pid/exe link: only local sys admin should - * be allowed to. - */ - if (prctl_map->exe_fd != (u32)-1) { - if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN)) - goto out; - } - error = 0; out: return error; @@ -1993,11 +1976,18 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) return -EFAULT; - error = validate_prctl_map(&prctl_map); + error = validate_prctl_map_addr(&prctl_map); if (error) return error; if (prctl_map.auxv_size) { + /* + * Someone is trying to cheat the auxv vector. + */ + if (!prctl_map.auxv || + prctl_map.auxv_size > sizeof(mm->saved_auxv)) + return -EINVAL; + memset(user_auxv, 0, sizeof(user_auxv)); if (copy_from_user(user_auxv, (const void __user *)prctl_map.auxv, @@ -2010,6 +2000,14 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data } if (prctl_map.exe_fd != (u32)-1) { + /* + * Make sure the caller has the rights to + * change /proc/pid/exe link: only local sys admin should + * be allowed to. + */ + if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN)) + return -EINVAL; + error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); if (error) return error; @@ -2097,7 +2095,11 @@ static int prctl_set_mm(int opt, unsigned long addr, unsigned long arg4, unsigned long arg5) { struct mm_struct *mm = current->mm; - struct prctl_mm_map prctl_map; + struct prctl_mm_map prctl_map = { + .auxv = NULL, + .auxv_size = 0, + .exe_fd = -1, + }; struct vm_area_struct *vma; int error; @@ -2125,9 +2127,15 @@ static int prctl_set_mm(int opt, unsigned long addr, error = -EINVAL; - down_write(&mm->mmap_sem); + /* + * arg_lock protects concurent updates of arg boundaries, we need + * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr + * validation. + */ + down_read(&mm->mmap_sem); vma = find_vma(mm, addr); + spin_lock(&mm->arg_lock); prctl_map.start_code = mm->start_code; prctl_map.end_code = mm->end_code; prctl_map.start_data = mm->start_data; @@ -2139,9 +2147,6 @@ static int prctl_set_mm(int opt, unsigned long addr, prctl_map.arg_end = mm->arg_end; prctl_map.env_start = mm->env_start; prctl_map.env_end = mm->env_end; - prctl_map.auxv = NULL; - prctl_map.auxv_size = 0; - prctl_map.exe_fd = -1; switch (opt) { case PR_SET_MM_START_CODE: @@ -2181,7 +2186,7 @@ static int prctl_set_mm(int opt, unsigned long addr, goto out; } - error = validate_prctl_map(&prctl_map); + error = validate_prctl_map_addr(&prctl_map); if (error) goto out; @@ -2218,7 +2223,8 @@ static int prctl_set_mm(int opt, unsigned long addr, error = 0; out: - up_write(&mm->mmap_sem); + spin_unlock(&mm->arg_lock); + up_read(&mm->mmap_sem); return error; } diff --git a/lib/sort.c b/lib/sort.c index 50855ea8c2622b29bc77ade1555bff15b953b7a7..cf408aec3733e2fcbe1090228c25d5ea99d35e1d 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -43,8 +43,9 @@ static bool is_aligned(const void *base, size_t size, unsigned char align) /** * swap_words_32 - swap two elements in 32-bit chunks - * @a, @b: pointers to the elements - * @size: element size (must be a multiple of 4) + * @a: pointer to the first element to swap + * @b: pointer to the second element to swap + * @n: element size (must be a multiple of 4) * * Exchange the two objects in memory. This exploits base+index addressing, * which basically all CPUs have, to minimize loop overhead computations. @@ -65,8 +66,9 @@ static void swap_words_32(void *a, void *b, size_t n) /** * swap_words_64 - swap two elements in 64-bit chunks - * @a, @b: pointers to the elements - * @size: element size (must be a multiple of 8) + * @a: pointer to the first element to swap + * @b: pointer to the second element to swap + * @n: element size (must be a multiple of 8) * * Exchange the two objects in memory. This exploits base+index * addressing, which basically all CPUs have, to minimize loop overhead @@ -100,8 +102,9 @@ static void swap_words_64(void *a, void *b, size_t n) /** * swap_bytes - swap two elements a byte at a time - * @a, @b: pointers to the elements - * @size: element size + * @a: pointer to the first element to swap + * @b: pointer to the second element to swap + * @n: element size * * This is the fallback if alignment doesn't allow using larger chunks. */ diff --git a/mm/compaction.c b/mm/compaction.c index 9febc8cc84e727d74470dbccb9cf02acff0a6c4a..9e1b9acb116b9b3efaabd8ee02538c20456b4714 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1399,7 +1399,7 @@ fast_isolate_freepages(struct compact_control *cc) page = pfn_to_page(highest); cc->free_pfn = highest; } else { - if (cc->direct_compaction) { + if (cc->direct_compaction && pfn_valid(min_pfn)) { page = pfn_to_page(min_pfn); cc->free_pfn = min_pfn; } diff --git a/mm/gup.c b/mm/gup.c index f173fcbaf1b2c9ab40dd65a6b2f3517aa7790ab5..ddde097cf9e4106bc02ea55538926f44ed8e587c 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1042,10 +1042,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, BUG_ON(ret >= nr_pages); } - if (!pages) - /* If it's a prefault don't insist harder */ - return ret; - if (ret > 0) { nr_pages -= ret; pages_done += ret; @@ -1061,8 +1057,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, pages_done = ret; break; } - /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ - pages += ret; + /* + * VM_FAULT_RETRY triggered, so seek to the faulting offset. + * For the prefault case (!pages) we only update counts. + */ + if (likely(pages)) + pages += ret; start += ret << PAGE_SHIFT; /* @@ -1085,7 +1085,8 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, pages_done++; if (!nr_pages) break; - pages++; + if (likely(pages)) + pages++; start += PAGE_SIZE; } if (lock_dropped && *locked) { diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 36afcf64e016fa7ef39e3c4404f6b3c89a60917f..242fdc01aaa9650e0f9dc2d18e659458b1e21067 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -464,7 +464,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, { unsigned long redzone_start; unsigned long redzone_end; - u8 tag; + u8 tag = 0xff; if (gfpflags_allow_blocking(flags)) quarantine_reduce(); diff --git a/mm/list_lru.c b/mm/list_lru.c index 0bdf3152735eb4de2135ebd69c2c68a8b1a79a08..e4709fdaa8e6af8a35114af54f5dfaa3436a5d1c 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -38,11 +38,7 @@ static int lru_shrinker_id(struct list_lru *lru) static inline bool list_lru_memcg_aware(struct list_lru *lru) { - /* - * This needs node 0 to be always present, even - * in the systems supporting sparse numa ids. - */ - return !!lru->node[0].memcg_lrus; + return lru->memcg_aware; } static inline struct list_lru_one * @@ -452,6 +448,8 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { int i; + lru->memcg_aware = memcg_aware; + if (!memcg_aware) return 0; diff --git a/mm/util.c b/mm/util.c index 91682a2090eeddb046761726eec1e81817c34d15..9834c4ab7d8e86e1ddb5024b28cf1ab2ff2e55a6 100644 --- a/mm/util.c +++ b/mm/util.c @@ -718,12 +718,12 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen) if (!mm->arg_end) goto out_mm; /* Shh! No looking before we're done */ - down_read(&mm->mmap_sem); + spin_lock(&mm->arg_lock); arg_start = mm->arg_start; arg_end = mm->arg_end; env_start = mm->env_start; env_end = mm->env_end; - up_read(&mm->mmap_sem); + spin_unlock(&mm->arg_lock); len = arg_end - arg_start; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 233af6936c93774b7dd8ebb15156d73605a45751..7350a124524bb4b203fff489c577586661cbf03d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -815,7 +815,7 @@ find_vmap_lowest_match(unsigned long size, } /* - * OK. We roll back and find the fist right sub-tree, + * OK. We roll back and find the first right sub-tree, * that will satisfy the search criteria. It can happen * only once due to "vstart" restriction. */ diff --git a/mm/z3fold.c b/mm/z3fold.c index 99be52c5ca4577bc66c8e5a1b1ee6f6a77877d90..985732c8b025eb0c4064a18243114b25b34e5e87 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -190,10 +190,11 @@ static int size_to_chunks(size_t size) static void compact_page_work(struct work_struct *w); -static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool) +static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, + gfp_t gfp) { struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle, - GFP_KERNEL); + gfp); if (slots) { memset(slots->slot, 0, sizeof(slots->slot)); @@ -295,10 +296,10 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool) /* Initializes the z3fold header of a newly allocated z3fold page */ static struct z3fold_header *init_z3fold_page(struct page *page, - struct z3fold_pool *pool) + struct z3fold_pool *pool, gfp_t gfp) { struct z3fold_header *zhdr = page_address(page); - struct z3fold_buddy_slots *slots = alloc_slots(pool); + struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp); if (!slots) return NULL; @@ -912,7 +913,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, if (!page) return -ENOMEM; - zhdr = init_z3fold_page(page, pool); + zhdr = init_z3fold_page(page, pool, gfp); if (!zhdr) { __free_page(page); return -ENOMEM; diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in index 1d73083da6cb2f6f36d10f8ef9af350cfaef2ee1..2efbec6b6b8db2355a52432f7ab0a8bb61a236a3 100644 --- a/scripts/gdb/linux/constants.py.in +++ b/scripts/gdb/linux/constants.py.in @@ -40,7 +40,8 @@ import gdb /* linux/clk-provider.h */ -LX_GDBPARSED(CLK_GET_RATE_NOCACHE) +if IS_BUILTIN(CONFIG_COMMON_CLK): + LX_GDBPARSED(CLK_GET_RATE_NOCACHE) /* linux/fs.h */ LX_VALUE(SB_RDONLY) diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py index 33df646618e203a08be9580f79cd1192897b5333..6374e078a5f228d66c9a05edc78547e5c26f9431 100755 --- a/scripts/spdxcheck.py +++ b/scripts/spdxcheck.py @@ -32,7 +32,8 @@ class SPDXdata(object): def read_spdxdata(repo): # The subdirectories of LICENSES in the kernel source - license_dirs = [ "preferred", "deprecated", "exceptions", "dual" ] + # Note: exceptions needs to be parsed as last directory. + license_dirs = [ "preferred", "dual", "deprecated", "exceptions" ] lictree = repo.head.commit.tree['LICENSES'] spdx = SPDXdata() @@ -58,13 +59,13 @@ def read_spdxdata(repo): elif l.startswith('SPDX-Licenses:'): for lic in l.split(':')[1].upper().strip().replace(' ', '').replace('\t', '').split(','): if not lic in spdx.licenses: - raise SPDXException(None, 'Exception %s missing license %s' %(ex, lic)) + raise SPDXException(None, 'Exception %s missing license %s' %(exception, lic)) spdx.exceptions[exception].append(lic) elif l.startswith("License-Text:"): if exception: if not len(spdx.exceptions[exception]): - raise SPDXException(el, 'Exception %s is missing SPDX-Licenses' %excid) + raise SPDXException(el, 'Exception %s is missing SPDX-Licenses' %exception) spdx.exception_files += 1 else: spdx.license_files += 1