A couple of locking fixes:
- A fix for the static_call mechanism so it handles unaligned addresses correctly. - Make u64_stats_init() a macro so every instance gets a seperate lockdep key. - Make seqcount_latch_init() a macro as well to preserve the static variable which is used for the lockdep key. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmBOK+ETHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYofjwD/0YlskydvnAOKeO8yjdBNiTtpw4aX5B jTFTGXTgsmXeRfLPUt5Fte/9DX/tF2hKNYdy9bbLTK9Xf6+NLqTPf99OQwONB3Dn 3vRYPGMBeq7zzKgdH9n3H408YgmsON9IikvPUWDIxDvOsniCUnS2UIHmGefK/uTh yuqnv+YhKBDLZz9XWiYm12Y163i7IsAurmyw95sI0G23HU0ityf7o42mXcFj2nkD ET5xH6b+cHz1JUzmciLW2MFhx85IyaLN2ZfEAZSXgU2YwlCGPSOSp+MV3UOpa8YM a6qW09L4rUsfWiB8SNMIaYyH7GHH5dvn9LrNP9/qF2QkAPeMisyTEkW2gyA/xLWG xPv5T8QSWkWpgTc3BkSl6A6Y+o3YOoHaTcT3v1/FU6ZfYbdT5sPvLyA/MplRxhzd thzrx9qSJvBzNiCNXgNdtICEuGTepuTb5ZbJTNmF4pMlNTB3Hbsl9EteAXD7V2pV BDE7ckdLZnnd5pAtV3bxqETqftvU0GYA+s4Wp+UT8c4NQIm1XDxAV5UuK01LigQi eAr5ja3TUGZWWr3uCM6QKZv6iYgldf9WtEQiovQaJIRUYZodmQ73AFA/mpeViPZF ZQGMiSX7UBySv52J9GLR5pe+G8go1VNlYPuGw9qMBUysVpZ0104ccgvqlJgnFlCH SA15mhCfEvZ0og== =iE+t -----END PGP SIGNATURE----- Merge tag 'locking-urgent-2021-03-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking fixes from Thomas Gleixner: "A couple of locking fixes: - A fix for the static_call mechanism so it handles unaligned addresses correctly. - Make u64_stats_init() a macro so every instance gets a seperate lockdep key. - Make seqcount_latch_init() a macro as well to preserve the static variable which is used for the lockdep key" * tag 'locking-urgent-2021-03-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: seqlock,lockdep: Fix seqcount_latch_init() u64_stats,lockdep: Fix u64_stats_init() vs lockdep static_call: Fix the module key fixup
This commit is contained in:
commit
fa509ff879
|
@ -664,10 +664,7 @@ typedef struct {
|
|||
* seqcount_latch_init() - runtime initializer for seqcount_latch_t
|
||||
* @s: Pointer to the seqcount_latch_t instance
|
||||
*/
|
||||
static inline void seqcount_latch_init(seqcount_latch_t *s)
|
||||
{
|
||||
seqcount_init(&s->seqcount);
|
||||
}
|
||||
#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
|
||||
|
||||
/**
|
||||
* raw_read_seqcount_latch() - pick even/odd latch data copy
|
||||
|
|
|
@ -115,12 +115,13 @@ static inline void u64_stats_inc(u64_stats_t *p)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
|
||||
#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
|
||||
#else
|
||||
static inline void u64_stats_init(struct u64_stats_sync *syncp)
|
||||
{
|
||||
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
|
||||
seqcount_init(&syncp->seq);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
|
||||
{
|
||||
|
|
|
@ -349,7 +349,8 @@ static int static_call_add_module(struct module *mod)
|
|||
struct static_call_site *site;
|
||||
|
||||
for (site = start; site != stop; site++) {
|
||||
unsigned long addr = (unsigned long)static_call_key(site);
|
||||
unsigned long s_key = (long)site->key + (long)&site->key;
|
||||
unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
|
||||
unsigned long key;
|
||||
|
||||
/*
|
||||
|
@ -373,8 +374,8 @@ static int static_call_add_module(struct module *mod)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
site->key = (key - (long)&site->key) |
|
||||
(site->key & STATIC_CALL_SITE_FLAGS);
|
||||
key |= s_key & STATIC_CALL_SITE_FLAGS;
|
||||
site->key = key - (long)&site->key;
|
||||
}
|
||||
|
||||
return __static_call_init(mod, start, stop);
|
||||
|
|
Loading…
Reference in New Issue