@@ -7,7 +7,8 @@
* 4.6, outside that we can let our BPAUTO mechanism handle it.
*/
#if (LINUX_VERSION_IS_GEQ(3,18,0) && \
- LINUX_VERSION_IS_LESS(4,7,0))
+ LINUX_VERSION_IS_LESS(4,7,0)) || \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)
static inline
void backport_dev_coredumpm(struct device *dev, struct module *owner,
void *data, size_t datalen, gfp_t gfp,
@@ -80,7 +80,8 @@ extern int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
extern void eth_commit_mac_addr_change(struct net_device *dev, void *p);
#endif /* < 3.9 */
-#if LINUX_VERSION_IS_LESS(3,12,0)
+#if LINUX_VERSION_IS_LESS(3,12,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
/**
* eth_hw_addr_inherit - Copy dev_addr from another net_device
* @dst: pointer to net_device to copy dev_addr to
@@ -193,7 +194,8 @@ static inline int eth_skb_pad(struct sk_buff *skb)
}
#endif /* LINUX_VERSION_IS_LESS(3,19,0) */
-#if LINUX_VERSION_IS_LESS(4,11,0)
+#if LINUX_VERSION_IS_LESS(4,11,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
/**
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -2,10 +2,12 @@
#define __BACKPORT_LINUX_FIRMWARE_H
#include_next <linux/firmware.h>
-#if LINUX_VERSION_IS_LESS(3,14,0)
+#if LINUX_VERSION_IS_LESS(3,14,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define request_firmware_direct(fw, name, device) request_firmware(fw, name, device)
#endif
-#if LINUX_VERSION_IS_LESS(4,18,0)
+#if LINUX_VERSION_IS_LESS(4,18,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define firmware_request_nowarn(fw, name, device) request_firmware(fw, name, device)
#endif
@@ -2,7 +2,8 @@
#define __BACKPORT_LINUX_FTRACE_EVENT_H
#include_next <linux/ftrace_event.h>
-#if LINUX_VERSION_IS_LESS(4,0,0)
+#if LINUX_VERSION_IS_LESS(4,0,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
const char *ftrace_print_array_seq(struct trace_seq *p,
const void *buf, int buf_len,
size_t el_size);
@@ -1,5 +1,10 @@
#ifndef __BACKPORT_LNIUX_JIFFIES_H
#define __BACKPORT_LNIUX_JIFFIES_H
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC 1000000000L
+#endif
+
#include_next <linux/jiffies.h>
#ifndef time_is_before_jiffies
@@ -207,7 +207,8 @@ int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *
#endif
-#if LINUX_VERSION_IS_LESS(3,14,0)
+#if LINUX_VERSION_IS_LESS(3,14,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
{
return (u32)(((u64) val * ep_ro) >> 32);
@@ -4,7 +4,8 @@
#include <linux/timekeeping.h>
#include <linux/version.h>
-#if LINUX_VERSION_IS_LESS(3,17,0)
+#if LINUX_VERSION_IS_LESS(3,17,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define ktime_get_raw LINUX_BACKPORT(ktime_get_raw)
extern ktime_t ktime_get_raw(void);
@@ -12,6 +12,61 @@
void kvfree(const void *addr);
#endif /* < 3.15 */
+#if LINUX_VERSION_IS_LESS(3,20,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
+#define get_user_pages_locked LINUX_BACKPORT(get_user_pages_locked)
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages, int *locked);
+#define get_user_pages_unlocked LINUX_BACKPORT(get_user_pages_unlocked)
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages);
+#elif LINUX_VERSION_IS_LESS(4,6,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
+static inline
+long backport_get_user_pages_locked(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages, int *locked)
+{
+ return get_user_pages_locked(current, current->mm, start, nr_pages,
+ write, force, pages, locked);
+}
+#define get_user_pages_locked LINUX_BACKPORT(get_user_pages_locked)
+
+static inline
+long backport_get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages)
+{
+ return get_user_pages_unlocked(current, current->mm, start, nr_pages,
+ write, force, pages);
+}
+#define get_user_pages_unlocked LINUX_BACKPORT(get_user_pages_unlocked)
+#endif
+
+#if LINUX_VERSION_IS_LESS(4,6,0)
+static inline
+long backport_get_user_pages(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ return get_user_pages(current, current->mm, start, nr_pages,
+ write, force, pages, vmas);
+}
+#define get_user_pages LINUX_BACKPORT(get_user_pages)
+#endif
+
+#ifndef FOLL_TRIED
+#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
+#endif
+
+#if LINUX_VERSION_IS_LESS(4,1,9) && \
+ LINUX_VERSION_IS_GEQ(3,6,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
+#define page_is_pfmemalloc LINUX_BACKPORT(page_is_pfmemalloc)
+static inline bool page_is_pfmemalloc(struct page *page)
+{
+ return page->pfmemalloc;
+}
+#endif /* < 4.2 */
+
#if LINUX_VERSION_IS_LESS(4,12,0)
#define kvmalloc LINUX_BACKPORT(kvmalloc)
static inline void *kvmalloc(size_t size, gfp_t flags)
@@ -29,7 +29,8 @@ static const char __UNIQUE_ID(name)[] \
#endif
#endif /* < 3.8 */
-#if LINUX_VERSION_IS_LESS(3,17,0)
+#if LINUX_VERSION_IS_LESS(3,17,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
extern struct kernel_param_ops param_ops_ullong;
extern int param_set_ullong(const char *val, const struct kernel_param *kp);
extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
@@ -21,7 +21,8 @@ struct inet6_dev;
*/
#include <linux/hardirq.h>
-#if LINUX_VERSION_IS_LESS(3,14,0)
+#if LINUX_VERSION_IS_LESS(3,14,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
/*
* Backports note: if in-kernel support is provided we could then just
* take the kernel's implementation of __dev_kfree_skb_irq() as it requires
@@ -237,7 +238,8 @@ static inline void backport_unregister_netdevice_many(struct list_head *head)
#define napi_alloc_frag(fragsz) netdev_alloc_frag(fragsz)
#endif
-#if LINUX_VERSION_IS_LESS(3,19,0)
+#if LINUX_VERSION_IS_LESS(3,19,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
/* RSS keys are 40 or 52 bytes long */
#define NETDEV_RSS_KEY_LEN 52
#define netdev_rss_key_fill LINUX_BACKPORT(netdev_rss_key_fill)
@@ -257,7 +259,8 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
#define IFF_TX_SKB_SHARING 0
#endif
-#if LINUX_VERSION_IS_LESS(4,1,0)
+#if LINUX_VERSION_IS_LESS(4,1,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
netdev_features_t passthru_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
@@ -323,7 +326,8 @@ static inline void netif_tx_napi_add(struct net_device *dev,
#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM
#endif
-#if LINUX_VERSION_IS_LESS(4,7,0)
+#if LINUX_VERSION_IS_LESS(4,7,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define netif_trans_update LINUX_BACKPORT(netif_trans_update)
static inline void netif_trans_update(struct net_device *dev)
{
@@ -3,7 +3,8 @@
#include_next <linux/netlink.h>
#include <linux/version.h>
-#if LINUX_VERSION_IS_LESS(4,14,0)
+#if LINUX_VERSION_IS_LESS(4,14,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
struct nla_bitfield32 {
__u32 value;
__u32 selector;
@@ -1,7 +1,8 @@
#ifndef __BP_PAGE_REF_H
#define __BP_PAGE_REF_H
#include <linux/version.h>
-#if LINUX_VERSION_IS_GEQ(4,6,0)
+#if LINUX_VERSION_IS_GEQ(4,6,0) || \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)
#include_next <linux/page_ref.h>
#else
static inline void page_ref_inc(struct page *page)
@@ -206,7 +206,8 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
#if LINUX_VERSION_IS_LESS(4,9,0) && \
!LINUX_VERSION_IN_RANGE(3,12,69, 3,13,0) && \
!LINUX_VERSION_IN_RANGE(4,4,37, 4,5,0) && \
- !LINUX_VERSION_IN_RANGE(4,8,13, 4,9,0)
+ !LINUX_VERSION_IN_RANGE(4,8,13, 4,9,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
{
@@ -5,7 +5,8 @@
#define __BACKPORT_PERCPU_H
#include_next <linux/percpu.h>
-#if LINUX_VERSION_IS_LESS(3,18,0)
+#if LINUX_VERSION_IS_LESS(3,18,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline void __percpu *__alloc_gfp_warn(void)
{
WARN(1, "Cannot backport alloc_percpu_gfp");
@@ -16,7 +16,8 @@ static inline bool pm_runtime_active(struct device *dev) { return true; }
#endif /* LINUX_VERSION_IS_LESS(3,9,0) */
-#if LINUX_VERSION_IS_LESS(3,15,0)
+#if LINUX_VERSION_IS_LESS(3,15,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline int pm_runtime_force_suspend(struct device *dev)
{
#ifdef CONFIG_PM
@@ -1,7 +1,8 @@
#ifndef __BACKPORT_LINUX_PROPERTY_H_
#define __BACKPORT_LINUX_PROPERTY_H_
#include <linux/version.h>
-#if LINUX_VERSION_IS_GEQ(3,18,17)
+#if LINUX_VERSION_IS_GEQ(3,18,17) || \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)
#include_next <linux/property.h>
#endif
@@ -19,7 +19,8 @@
#endif
#if LINUX_VERSION_IS_LESS(3,13,0) && \
- !defined(CONFIG_PROVE_LOCKING)
+ !defined(CONFIG_PROVE_LOCKING) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline bool lockdep_rtnl_is_held(void)
{
return true;
@@ -309,7 +309,8 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len);
#endif /* LINUX_VERSION_IS_LESS(3,19,0) */
-#if LINUX_VERSION_IS_LESS(4,2,0)
+#if LINUX_VERSION_IS_LESS(4,2,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline void skb_free_frag(void *data)
{
put_page(virt_to_head_page(data));
@@ -338,7 +339,8 @@ static inline u32 skb_get_hash_perturb(struct sk_buff *skb, u32 key)
#endif /* LINUX_VERSION_IS_LESS(3,3,0) */
#endif /* LINUX_VERSION_IS_LESS(4,2,0) */
-#if LINUX_VERSION_IS_LESS(4,13,0)
+#if LINUX_VERSION_IS_LESS(4,13,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline void *backport_skb_put(struct sk_buff *skb, unsigned int len)
{
return skb_put(skb, len);
@@ -33,7 +33,8 @@ ssize_t strscpy(char *dest, const char *src, size_t count);
char *strreplace(char *s, char old, char new);
#endif
-#if LINUX_VERSION_IS_LESS(4,6,0)
+#if LINUX_VERSION_IS_LESS(4,6,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
int match_string(const char * const *array, size_t n, const char *string);
#endif /* LINUX_VERSION_IS_LESS(4,5,0) */
@@ -1,5 +1,11 @@
#ifndef __BACKPORT_LINUX_TIME_H
#define __BACKPORT_LINUX_TIME_H
+
+#if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)
+#include_next <linux/time64.h>
+#include <uapi/linux/time.h>
+#endif
+
#include_next <linux/time.h>
#include <linux/time64.h>
@@ -6,7 +6,8 @@
#include <linux/time.h>
#endif
-#if LINUX_VERSION_IS_LESS(3,17,0)
+#if LINUX_VERSION_IS_LESS(3,17,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define timespec64_equal timespec_equal
#define timespec64_compare timespec_compare
#define set_normalized_timespec64 set_normalized_timespec
@@ -3,11 +3,13 @@
#include <linux/version.h>
#include <linux/types.h>
-#if LINUX_VERSION_IS_GEQ(3,17,0)
+#if LINUX_VERSION_IS_GEQ(3,17,0) || \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)
#include_next <linux/timekeeping.h>
#endif
-#if LINUX_VERSION_IS_LESS(3,17,0)
+#if LINUX_VERSION_IS_LESS(3,17,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define ktime_get_ns LINUX_BACKPORT(ktime_get_ns)
extern ktime_t ktime_get(void);
#define ktime_get_ns LINUX_BACKPORT(ktime_get_ns)
@@ -48,14 +50,16 @@ static inline time64_t ktime_get_real_seconds(void)
}
#endif
-#if LINUX_VERSION_IS_LESS(3,17,0)
+#if LINUX_VERSION_IS_LESS(3,17,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline void ktime_get_ts64(struct timespec64 *ts)
{
ktime_get_ts(ts);
}
#endif
-#if LINUX_VERSION_IS_LESS(3,17,0)
+#if LINUX_VERSION_IS_LESS(3,17,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
/* This was introduced in 4.15, but we only need it in the
* ktime_get_raw_ts64 backport() for < 3.17.
*/
@@ -77,7 +81,8 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
#endif
#endif /* < 3.17 */
-#if LINUX_VERSION_IS_LESS(4,18,0)
+#if LINUX_VERSION_IS_LESS(4,18,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define ktime_get_raw_ts64 LINUX_BACKPORT(ktime_get_raw_ts64)
static inline void ktime_get_raw_ts64(struct timespec64 *ts)
{
@@ -115,7 +115,8 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
#endif /* LINUX_VERSION_IS_GEQ(3,6,0) */
#if LINUX_VERSION_IS_LESS(3,15,0) && \
- !(LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30)
+ !(LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
@@ -11,7 +11,8 @@
#define UUID_STRING_LEN 36
#endif
-#if LINUX_VERSION_IS_LESS(4,13,0)
+#if LINUX_VERSION_IS_LESS(4,13,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define guid_t uuid_le
#define uuid_t uuid_be
@@ -2,7 +2,8 @@
#define __BACKPORT_LINUX_WAIT_H
#include_next <linux/wait.h>
-#if LINUX_VERSION_IS_LESS(3,17,0)
+#if LINUX_VERSION_IS_LESS(3,17,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
extern int bit_wait(void *);
extern int bit_wait_io(void *);
@@ -23,7 +24,8 @@ backport_wait_on_bit_io(void *word, int bit, unsigned mode)
#endif
-#if LINUX_VERSION_IS_LESS(3,18,12)
+#if LINUX_VERSION_IS_LESS(3,18,12) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define WQ_FLAG_WOKEN 0x02
#define wait_woken LINUX_BACKPORT(wait_woken)
@@ -27,7 +27,8 @@ static inline int genl_err_attr(struct genl_info *info, int err,
/* this is for patches we apply */
static inline struct netlink_ext_ack *genl_info_extack(struct genl_info *info)
{
-#if LINUX_VERSION_IS_GEQ(4,12,0)
+#if LINUX_VERSION_IS_GEQ(4,12,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
return info->extack;
#else
return info->userhdr;
@@ -47,7 +48,8 @@ static inline void *genl_info_userhdr(struct genl_info *info)
#define genl_info_snd_portid(__genl_info) (__genl_info->snd_portid)
#endif
-#if LINUX_VERSION_IS_LESS(3,13,0)
+#if LINUX_VERSION_IS_LESS(3,13,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define __genl_const
#else /* < 3.13 */
#define __genl_const const
@@ -41,7 +41,8 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
atomic_add(i, &nf->mem);
}
-#elif LINUX_VERSION_IS_LESS(4,3,0)
+#elif LINUX_VERSION_IS_LESS(4,3,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define sub_frag_mem_limit LINUX_BACKPORT(sub_frag_mem_limit)
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
@@ -56,7 +57,8 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
#endif /* LINUX_VERSION_IS_LESS(4,3,0) */
#if LINUX_VERSION_IS_LESS(4,4,0) && \
- LINUX_VERSION_IS_GEQ(3,9,0)
+ LINUX_VERSION_IS_GEQ(3,9,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define inet_frags_uninit_net LINUX_BACKPORT(inet_frags_uninit_net)
static inline void inet_frags_uninit_net(struct netns_frags *nf)
{
@@ -2,7 +2,8 @@
#define __BACKPORT_IW_HANDLER_H
#include_next <net/iw_handler.h>
-#if LINUX_VERSION_IS_LESS(4,1,0)
+#if LINUX_VERSION_IS_LESS(4,1,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline char *
iwe_stream_add_event_check(struct iw_request_info *info, char *stream,
char *ends, struct iw_event *iwe, int event_len)
@@ -14,7 +14,8 @@ static inline struct net *get_net_ns_by_fd(int fd)
}
#endif
-#if LINUX_VERSION_IS_LESS(4,1,0)
+#if LINUX_VERSION_IS_LESS(4,1,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
typedef struct {
#ifdef CONFIG_NET_NS
struct net *net;
@@ -386,7 +386,8 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
#endif
-#if LINUX_VERSION_IS_LESS(4,1,0)
+#if LINUX_VERSION_IS_LESS(4,1,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define nla_put_in_addr LINUX_BACKPORT(nla_put_in_addr)
static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
__be32 addr)
@@ -535,7 +536,8 @@ static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
}
#endif /* < 4.7 */
-#if LINUX_VERSION_IS_LESS(4,10,0)
+#if LINUX_VERSION_IS_LESS(4,10,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
/**
* nla_memdup - duplicate attribute memory (kmemdup)
* @src: netlink attribute to duplicate from
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
+#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
{
if (!miter->__remaining) {
@@ -122,3 +123,4 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
return offset;
}
EXPORT_SYMBOL_GPL(sg_copy_buffer);
+#endif
@@ -118,7 +118,8 @@ bool pci_device_is_present(struct pci_dev *pdev)
EXPORT_SYMBOL_GPL(pci_device_is_present);
#endif /* CONFIG_PCI */
-#ifdef CONFIG_HWMON
+#if defined(CONFIG_HWMON) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
struct device*
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
@@ -15,6 +15,7 @@
#include <linux/jiffies.h>
#include <linux/moduleparam.h>
+#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
int bit_wait(void *word)
{
schedule();
@@ -28,7 +29,9 @@ int bit_wait_io(void *word)
return 0;
}
EXPORT_SYMBOL_GPL(bit_wait_io);
+#endif
+#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
/**
* ktime_get_raw - Returns the raw monotonic time in ktime_t format
*/
@@ -40,7 +43,7 @@ ktime_t ktime_get_raw(void)
return timespec_to_ktime(ts);
}
EXPORT_SYMBOL_GPL(ktime_get_raw);
-
+#endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) */
/**
* nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
@@ -146,6 +149,7 @@ char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
}
EXPORT_SYMBOL_GPL(devm_kasprintf);
+#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
#define STANDARD_PARAM_DEF(name, type, format, strtolfn) \
int param_set_##name(const char *val, const struct kernel_param *kp) \
{ \
@@ -164,3 +168,4 @@ EXPORT_SYMBOL_GPL(devm_kasprintf);
EXPORT_SYMBOL(param_get_##name); \
EXPORT_SYMBOL(param_ops_##name)
STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull);
+#endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) */
@@ -16,7 +16,8 @@
#include <linux/skbuff.h>
#include <linux/debugfs.h>
-#if LINUX_VERSION_IS_LESS(3,18,12)
+#if LINUX_VERSION_IS_LESS(3,18,12) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline bool is_kthread_should_stop(void)
{
return (current->flags & PF_KTHREAD) && kthread_should_stop();
@@ -83,6 +84,7 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
EXPORT_SYMBOL(woken_wake_function);
#endif
+#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
void netdev_rss_key_fill(void *buffer, size_t len)
@@ -96,6 +98,7 @@ void netdev_rss_key_fill(void *buffer, size_t len)
#endif
}
EXPORT_SYMBOL_GPL(netdev_rss_key_fill);
+#endif
#if defined(CONFIG_DEBUG_FS)
struct debugfs_devm_entry {
@@ -17,6 +17,185 @@
#include <linux/trace_seq.h>
#include <asm/unaligned.h>
+#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
+ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long nr_pages,
+ int write, int force,
+ struct page **pages,
+ struct vm_area_struct **vmas,
+ int *locked, bool notify_drop,
+ unsigned int flags)
+{
+ long ret, pages_done;
+ bool lock_dropped;
+
+ if (locked) {
+ /* if VM_FAULT_RETRY can be returned, vmas become invalid */
+ BUG_ON(vmas);
+ /* check caller initialized locked */
+ BUG_ON(*locked != 1);
+ }
+
+ if (pages)
+ flags |= FOLL_GET;
+ if (write)
+ flags |= FOLL_WRITE;
+ if (force)
+ flags |= FOLL_FORCE;
+
+ pages_done = 0;
+ lock_dropped = false;
+ for (;;) {
+ ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
+ vmas, locked);
+ if (!locked)
+ /* VM_FAULT_RETRY couldn't trigger, bypass */
+ return ret;
+
+ /* VM_FAULT_RETRY cannot return errors */
+ if (!*locked) {
+ BUG_ON(ret < 0);
+ BUG_ON(ret >= nr_pages);
+ }
+
+ if (!pages)
+ /* If it's a prefault don't insist harder */
+ return ret;
+
+ if (ret > 0) {
+ nr_pages -= ret;
+ pages_done += ret;
+ if (!nr_pages)
+ break;
+ }
+ if (*locked) {
+ /* VM_FAULT_RETRY didn't trigger */
+ if (!pages_done)
+ pages_done = ret;
+ break;
+ }
+ /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
+ pages += ret;
+ start += ret << PAGE_SHIFT;
+
+ /*
+ * Repeat on the address that fired VM_FAULT_RETRY
+ * without FAULT_FLAG_ALLOW_RETRY but with
+ * FAULT_FLAG_TRIED.
+ */
+ *locked = 1;
+ lock_dropped = true;
+ down_read(&mm->mmap_sem);
+ ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
+ pages, NULL, NULL);
+ if (ret != 1) {
+ BUG_ON(ret > 1);
+ if (!pages_done)
+ pages_done = ret;
+ break;
+ }
+ nr_pages--;
+ pages_done++;
+ if (!nr_pages)
+ break;
+ pages++;
+ start += PAGE_SIZE;
+ }
+ if (notify_drop && lock_dropped && *locked) {
+ /*
+ * We must let the caller know we temporarily dropped the lock
+ * and so the critical section protected by it was lost.
+ */
+ up_read(&mm->mmap_sem);
+ *locked = 0;
+ }
+ return pages_done;
+}
+
+/*
+ * We can leverage the VM_FAULT_RETRY functionality in the page fault
+ * paths better by using either get_user_pages_locked() or
+ * get_user_pages_unlocked().
+ *
+ * get_user_pages_locked() is suitable to replace the form:
+ *
+ * down_read(&mm->mmap_sem);
+ * do_something()
+ * get_user_pages(tsk, mm, ..., pages, NULL);
+ * up_read(&mm->mmap_sem);
+ *
+ * to:
+ *
+ * int locked = 1;
+ * down_read(&mm->mmap_sem);
+ * do_something()
+ * get_user_pages_locked(tsk, mm, ..., pages, &locked);
+ * if (locked)
+ * up_read(&mm->mmap_sem);
+ */
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ int *locked)
+{
+ return __get_user_pages_locked(current, current->mm, start, nr_pages,
+ write, force, pages, NULL, locked, true,
+ FOLL_TOUCH);
+}
+EXPORT_SYMBOL_GPL(get_user_pages_locked);
+
+/*
+ * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
+ * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
+ *
+ * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
+ * caller if required (just like with __get_user_pages). "FOLL_GET",
+ * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
+ * according to the parameters "pages", "write", "force"
+ * respectively.
+ */
+static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ unsigned int gup_flags)
+{
+ long ret;
+ int locked = 1;
+ down_read(&mm->mmap_sem);
+ ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
+ pages, NULL, &locked, false, gup_flags);
+ if (locked)
+ up_read(&mm->mmap_sem);
+ return ret;
+}
+
+/*
+ * get_user_pages_unlocked() is suitable to replace the form:
+ *
+ * down_read(&mm->mmap_sem);
+ * get_user_pages(tsk, mm, ..., pages, NULL);
+ * up_read(&mm->mmap_sem);
+ *
+ * with:
+ *
+ * get_user_pages_unlocked(tsk, mm, ..., pages);
+ *
+ * It is functionally equivalent to get_user_pages_fast so
+ * get_user_pages_fast should be used instead, if the two parameters
+ * "tsk" and "mm" are respectively equal to current and current->mm,
+ * or if "force" shall be set to 1 (get_user_pages_fast misses the
+ * "force" parameter).
+ */
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages)
+{
+ return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
+ write, force, pages, FOLL_TOUCH);
+}
+EXPORT_SYMBOL_GPL(get_user_pages_unlocked);
+#endif
+
/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
@@ -145,7 +324,8 @@ overflow1:
}
EXPORT_SYMBOL_GPL(hex_dump_to_buffer);
-#if LINUX_VERSION_IS_LESS(3,17,0)
+#if LINUX_VERSION_IS_LESS(3,17,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static inline unsigned char *
trace_seq_buffer_ptr(struct trace_seq *s)
{
@@ -153,6 +333,7 @@ trace_seq_buffer_ptr(struct trace_seq *s)
}
#endif
+#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
const char *
ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len,
size_t el_size)
@@ -196,3 +377,4 @@ ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len,
return ret;
}
EXPORT_SYMBOL(ftrace_print_array_seq);
+#endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) */
@@ -12,6 +12,7 @@
#include <linux/netdevice.h>
#include <linux/tty.h>
+#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
netdev_features_t passthru_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
@@ -19,6 +20,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
return features;
}
EXPORT_SYMBOL_GPL(passthru_features_check);
+#endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) */
#ifdef CONFIG_TTY
#if LINUX_VERSION_IS_GEQ(4,0,0)
@@ -11,7 +11,8 @@
#include <crypto/scatterwalk.h>
#include <crypto/aead.h>
-static struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
+#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
+ static struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len)
{
@@ -32,6 +33,7 @@ static struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
return dst;
}
+#endif
struct aead_old_request {
struct scatterlist srcbuf[2];
@@ -21,7 +21,8 @@
#include <asm/unaligned.h>
#ifdef CONFIG_DEBUG_FS
-#if LINUX_VERSION_IS_LESS(4,3,0)
+#if LINUX_VERSION_IS_LESS(4,3,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
static ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -76,7 +76,8 @@ int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
}
EXPORT_SYMBOL_GPL(kstrtobool_from_user);
-/**
+#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
+ /**
* match_string - matches given string in an array
* @array: array of strings
* @n: number of strings in the array or -1 for NULL terminated arrays
@@ -101,3 +102,4 @@ int match_string(const char * const *array, size_t n, const char *string)
return -EINVAL;
}
EXPORT_SYMBOL(match_string);
+#endif /* RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6) */
@@ -120,7 +120,8 @@ EXPORT_SYMBOL_GPL(nla_put_64bit);
* Below 3.18 or if the kernel has devcoredump disabled, we copied the
* entire devcoredump, so no need to define these functions.
*/
-#if LINUX_VERSION_IS_GEQ(3,18,0) && \
+#if (LINUX_VERSION_IS_GEQ(3,18,0) || \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)) && \
!defined(CPTCFG_BPAUTO_BUILD_WANT_DEV_COREDUMP)
#include <linux/devcoredump.h>
#include <linux/scatterlist.h>
@@ -251,7 +251,8 @@ int backport_genl_register_family(struct genl_family *family)
family->family.post_doit = backport_post_doit;
/* attrbuf is output only */
family->copy_ops = ops;
-#if LINUX_VERSION_IS_GEQ(3,13,0)
+#if LINUX_VERSION_IS_GEQ(3,13,0) || \
+ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)
family->family.ops = ops;
COPY(mcgrps);
COPY(n_ops);
@@ -306,7 +307,8 @@ static u32 __backport_genl_group(const struct genl_family *family,
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return INVALID_GROUP;
-#if LINUX_VERSION_IS_LESS(3,13,0)
+#if LINUX_VERSION_IS_LESS(3,13,0) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
return family->mcgrps[group].id;
#else
return family->family.mcgrp_offset + group;
@@ -1,6 +1,6 @@
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
-@@ -1131,10 +1131,25 @@ static void ieee80211_uninit(struct net_
+@@ -1128,10 +1128,26 @@ static void ieee80211_uninit(struct net_
ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
}
@@ -10,7 +10,8 @@
struct net_device *sb_dev,
select_queue_fallback_t fallback)
+#elif LINUX_VERSION_IS_GEQ(3,14,0) || \
-+ (LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30)
++ (LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30) || \
++ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)
+static u16 ieee80211_netdev_select_queue(struct net_device *dev,
+ struct sk_buff *skb,
+ void *accel_priv,
@@ -26,7 +27,7 @@
{
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
}
-@@ -1177,10 +1192,25 @@ static const struct net_device_ops ieee8
+@@ -1174,10 +1189,26 @@ static const struct net_device_ops ieee8
.ndo_get_stats64 = ieee80211_get_stats64,
};
@@ -36,7 +37,8 @@
struct net_device *sb_dev,
select_queue_fallback_t fallback)
+#elif LINUX_VERSION_IS_GEQ(3,14,0) || \
-+ (LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30)
++ (LINUX_VERSION_CODE == KERNEL_VERSION(3,13,11) && UTS_UBUNTU_RELEASE_ABI > 30) || \
++ RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)
+static u16 ieee80211_monitor_select_queue(struct net_device *dev,
+ struct sk_buff *skb,
+ void *accel_priv,
@@ -73,7 +73,7 @@ func(...) {
constant r1.e1,r1.e2;
identifier r.OPS;
@@
-+#if LINUX_VERSION_IS_LESS(4,10,0)
++#if LINUX_VERSION_IS_LESS(4,10,0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
+ static int __change_mtu(struct net_device *ndev, int new_mtu)
+ {
+ if (new_mtu < e1 || new_mtu > e2)
@@ -92,7 +92,7 @@ identifier OPS;
@@
struct net_device_ops OPS = {
-+#if LINUX_VERSION_IS_LESS(4,10,0)
++#if LINUX_VERSION_IS_LESS(4,10,0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
+ .ndo_change_mtu = __change_mtu,
+#endif
...
@@ -5,7 +5,7 @@ fresh identifier stats64_fn_wrap = "bp_" ## stats64_fn;
position p;
@@
struct net_device_ops OPS@p = {
-+#if LINUX_VERSION_IS_GEQ(4,11,0)
++#if LINUX_VERSION_IS_GEQ(4,11,0) || RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)
.ndo_get_stats64 = stats64_fn,
+#else
+ .ndo_get_stats64 = stats64_fn_wrap,
@@ -17,7 +17,7 @@ identifier r.stats64_fn_wrap;
identifier r.stats64_fn;
@@
void stats64_fn(...) {...}
-+#if LINUX_VERSION_IS_LESS(4,11,0)
++#if LINUX_VERSION_IS_LESS(4,11,0) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,6)
+/* Just declare it here to keep sparse happy */
+struct rtnl_link_stats64 *
+stats64_fn_wrap(struct net_device *dev,